repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ishalyminov/memn2n | tf_config.py | 1 | 1480 | import tensorflow as tf
def configure(in_config):
tf.flags.DEFINE_float(
'learning_rate',
in_config['learning_rate'],
'Learning rate for Adam Optimizer'
)
tf.flags.DEFINE_float(
'epsilon',
in_config['epsilon'],
'Epsilon value for Adam Optimizer'
)
tf.flags.DEFINE_float(
'max_grad_norm',
in_config['max_grad_norm'],
'Clip gradients to this norm')
tf.flags.DEFINE_integer(
'evaluation_interval',
in_config['evaluation_interval'],
"Evaluate and print results every x epochs"
)
tf.flags.DEFINE_integer(
'batch_size',
in_config['batch_size'],
'Batch size for training'
)
tf.flags.DEFINE_integer(
'hops',
in_config['hops'],
'Number of hops in the Memory Network'
)
tf.flags.DEFINE_integer(
'epochs',
in_config['epochs'],
'Number of epochs to train for'
)
tf.flags.DEFINE_integer(
'embedding_size',
in_config['embedding_size'],
'Embedding size for embedding matrices'
)
tf.flags.DEFINE_integer(
'memory_size',
in_config['memory_size'],
'Maximum size of memory'
)
tf.flags.DEFINE_integer(
'task_id',
in_config['task_id'],
"bAbI task id, 1 <= id <= 6"
)
tf.flags.DEFINE_integer(
'random_state',
in_config['random_state'],
'Random state'
) | mit | 703,187,977,260,740,600 | 24.534483 | 51 | 0.55473 | false |
droundy/deft | papers/thesis-kirstie/figs/plot_LJ_Potential.py | 1 | 1142 | #!/usr/bin/python3
#RUN this program from the directory it is listed in
#with command ./plot_LJ_Potential.py
from scipy import special
import numpy as np
import matplotlib.pyplot as plt
import math
#Plot WCA Potential vs r
#R=1/1.781797436 #for a sigma=1 DOESN'T WORK!! graph wrong shape!
R=1/1.781797436
epsilon=1
sigma=1
#print sigma
#r=np.linspace(.1, 2*R, 200)
#r=np.linspace(.9, 4, 200) #SAVE!!! for plotting r
r=np.linspace(.9, 2.5, 200)
r_dless=sigma/r #plot dimensionless quantity!
sigma_over_r_to_pow6=(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) + epsilon #WCA potential
#V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential but looks like WCA
V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential
plt.plot(1/r_dless,V)
plt.xlim(right=2.5)
plt.ylim(top=V.max())
plt.xlabel('r/$\sigma$')
#plt.xlabel('r')
plt.ylabel('V(r)/$\epsilon$')
plt.title('Leonard-Jones Potential')
#plt.legend()
plt.savefig("LJ_Potential.pdf")
# plt.show()
| gpl-2.0 | -5,032,273,101,040,815,000 | 23.826087 | 113 | 0.697023 | false |
FowlerLab/Enrich2 | enrich2/seqlib.py | 1 | 15885 | from __future__ import print_function
import logging
import os.path
import pandas as pd
import numpy as np
from collections import OrderedDict
from matplotlib.backends.backend_pdf import PdfPages
import sys
from .plots import counts_plot
from .storemanager import StoreManager, fix_filename, ELEMENT_LABELS
class SeqLib(StoreManager):
"""
Abstract class for handling count data from a single sequencing library.
"""
# Note: the following block is referenced by line number above
# When adding new messages, update the documentation line numbers also!
filter_messages = OrderedDict(
[
("min quality", "single-base quality"),
("avg quality", "average quality"),
("max N", "excess N bases"),
("chastity", "not chaste"),
("remove unresolvable", "unresolvable mismatch"),
("merge failure", "unable to merge reads"),
("total", "total"),
]
)
store_suffix = "lib"
def __init__(self):
StoreManager.__init__(self)
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
self.timepoint = None
self.counts_file = None
self.report_filtered = None
self._filters = dict()
self.filter_stats = dict()
self.default_filters = dict()
self.default_filters.update({"min quality": 0})
self.default_filters.update({"max N": sys.maxsize})
self.default_filters.update({"avg quality": 0})
self.default_filters.update({"chastity": False})
@property
def filters(self):
return self._filters
@filters.setter
def filters(self, config_filters):
"""
Set up the filters dictionary using the options selected in
*config_filters*, filling in missing entries with defaults.
"""
self._filters.clear()
self._filters.update(self.default_filters)
unused = list()
for key in config_filters:
if key in self._filters:
if config_filters[key] is not None:
self._filters[key] = config_filters[key]
else:
unused.append(key)
if len(unused) > 0:
self.logger.warning(
"Unused filter parameters ({})" "".format(", ".join(unused))
)
self.filter_stats.clear()
for key in self._filters:
self.filter_stats[key] = 0
self.filter_stats["total"] = 0
def serialize_filters(self):
"""
Return a dictionary of filtering options that have non-default values.
"""
cfg = dict()
for key in self.filters.keys():
if self.filters[key] != self.default_filters[key]:
cfg[key] = self.filters[key]
return cfg
def _children(self):
"""
These objects have no children. Returns ``None``.
"""
return None
def add_child(self, child):
"""
No children, raises an AttributeError.
"""
raise AttributeError("SeqLib objects do not support adding children")
def remove_child_id(self, tree_id):
"""
No children, raises an AttributeError.
"""
raise AttributeError("SeqLib objects do not support removing children")
def validate(self):
"""
Validates paramaters for a configured SeqLib. Currently does nothing.
"""
pass
def has_wt_sequence(self):
"""
Returns whether or not the object has a wild type sequence. Returns
``False`` unless overloaded by a derived class (such as
:py:class:`~seqlib.seqlib.VariantSeqLib`).
"""
return False
def configure(self, cfg):
"""
Set up the object using the config object *cfg*, usually derived from
a ``.json`` file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
try:
self.timepoint = int(cfg["timepoint"])
if "report filtered reads" in cfg:
self.report_filtered = cfg["report filtered reads"]
else:
self.report_filtered = False
if "counts file" in cfg:
self.counts_file = cfg["counts file"]
else:
self.counts_file = None
except KeyError as key:
raise KeyError(
"Missing required config value {key}" "".format(key=key), self.name
)
except ValueError as value:
raise ValueError(
"Invalid parameter value {value}" "".format(value=value), self.name
)
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["timepoint"] = self.timepoint
cfg["report filtered reads"] = self.report_filtered
if self.counts_file is not None:
cfg["counts file"] = self.counts_file
return cfg
def calculate(self):
"""
Pure virtual method that defines how the data are counted.
"""
raise NotImplementedError("must be implemented by subclass")
def report_filtered_read(self, fq, filter_flags):
"""
Write the :py:class:`~fqread.FQRead` object *fq* to the ``DEBUG``
log. The dictionary *filter_flags* contains ``True``
values for each filtering option that applies to *fq*. Keys in
*filter_flags* are converted to messages using the
``SeqLib.filter_messages`` dictionary.
"""
self.logger.debug(
"Filtered read ({messages})\n{read!s}".format(
messages=", ".join(
SeqLib.filter_messages[x] for x in filter_flags if filter_flags[x]
),
name=self.name,
read=fq,
)
)
def save_counts(self, label, df_dict, raw):
"""
Convert the counts in the dictionary *df_dict* into a DataFrame object
and save it to the data store.
If *raw* is ``True``, the counts are stored under
``"/raw/label/counts"``; else ``"/main/label/counts"``.
"""
if len(df_dict.keys()) == 0:
raise ValueError("Failed to count {} [{}]".format(label, self.name))
df = pd.DataFrame.from_dict(df_dict, orient="index", dtype=np.int32)
df.columns = ["count"]
df.sort_values("count", ascending=False, inplace=True)
self.logger.info(
"Counted {n} {label} ({u} unique)".format(
n=df["count"].sum(), u=len(df.index), label=label
)
)
if raw:
key = "/raw/{}/counts".format(label)
else:
key = "/main/{}/counts".format(label)
self.store.put(key, df, format="table", data_columns=df.columns)
del df
def save_filtered_counts(self, label, query):
"""
Filter the counts in ``"/raw/label/counts"`` using the *query* string
and store the result in ``"/main/label/counts"``
For more information on building query strings, see
http://pandas.pydata.org/pandas-docs/stable/io.html#querying-a-table
"""
self.logger.info("Converting raw {} counts to main counts".format(label))
raw_table = "/raw/{}/counts".format(label)
main_table = "/main/{}/counts".format(label)
self.map_table(source=raw_table, destination=main_table, source_query=query)
self.logger.info(
"Counted {n} {label} ({u} unique) after query".format(
n=self.store[main_table]["count"].sum(),
u=len(self.store[main_table].index),
label=label,
)
)
def report_filter_stats(self):
"""
Create report file for the number of filtered reads.
The report file is located in the output directory, named
``SeqLibName.filter.txt``.
It contains the number of reads filtered for each category, plus the
total number filtered.
.. note:: Reads are checked for all quality-based criteria before \
filtering.
"""
with open(
os.path.join(self.output_dir, fix_filename(self.name) + ".filter.txt"), "w"
) as handle:
for key in sorted(
self.filter_stats, key=self.filter_stats.__getitem__, reverse=True
):
if key != "total" and self.filter_stats[key] > 0:
print(
SeqLib.filter_messages[key],
self.filter_stats[key],
sep="\t",
file=handle,
)
print("total", self.filter_stats["total"], sep="\t", file=handle)
self.logger.info("Wrote filtering statistics")
def save_filter_stats(self):
"""
Save a DataFrame containing the number of filtered reads under
``'/raw/filter'``.
This DataFrame contains the same information as ``report_filter_stats``
"""
df = pd.DataFrame(index=SeqLib.filter_messages.values(), columns=["count"])
for key in self.filter_stats.keys():
if self.filter_stats[key] > 0 or key == "total":
df.loc[SeqLib.filter_messages[key], "count"] = self.filter_stats[key]
df.dropna(inplace=True)
self.store.put(
"/raw/filter", df.astype(int), format="table", data_columns=df.columns
)
def read_quality_filter(self, fq):
"""
Check the quality of the FQRead object *fq*.
Checks ``'chastity'``, ``'min quality'``, ``'avg quality'``,
``'max N'``, and ``'remove unresolvable'``.
Counts failed reads for later output and reports the filtered read if
desired.
Returns ``True`` if the read passes all filters, else ``False``.
"""
filter_flags = dict()
for key in self.filters:
filter_flags[key] = False
if self.filters["chastity"]:
if not fq.is_chaste():
self.filter_stats["chastity"] += 1
filter_flags["chastity"] = True
if self.filters["min quality"] > 0:
if fq.min_quality() < self.filters["min quality"]:
self.filter_stats["min quality"] += 1
filter_flags["min quality"] = True
if self.filters["avg quality"] > 0:
if fq.mean_quality() < self.filters["avg quality"]:
self.filter_stats["avg quality"] += 1
filter_flags["avg quality"] = True
if self.filters["max N"] >= 0:
if fq.sequence.upper().count("N") > self.filters["max N"]:
self.filter_stats["max N"] += 1
filter_flags["max N"] = True
if "remove unresolvable" in self.filters: # OverlapSeqLib only
if self.filters["remove unresolvable"]:
if "X" in fq.sequence:
self.filter_stats["remove unresolvable"] += 1
filter_flags["remove unresolvable"] = True
# update total and report if failed
if any(filter_flags.values()):
self.filter_stats["total"] += 1
if self.report_filtered:
self.report_filtered_read(fq, filter_flags)
return False
else:
return True
def make_plots(self):
"""
Make plots that are shared by all :py:class:`~seqlib.seqlib.SeqLib`
objects.
Creates counts histograms for all labels.
"""
if self.plots_requested:
self.logger.info("Creating plots")
pdf = PdfPages(os.path.join(self.plot_dir, "counts.pdf"))
for label in self.labels:
counts_plot(self, label, pdf, log=True)
counts_plot(self, label, pdf, log=False)
pdf.close()
def write_tsv(self):
"""
Write each table from the store to its own tab-separated file.
Files are written to a ``tsv`` directory in the default output
location.
File names are the HDF5 key with ``'_'`` substituted for ``'/'``.
"""
if self.tsv_requested:
self.logger.info("Generating tab-separated output files")
for k in self.store.keys():
self.write_table_tsv(k)
def counts_from_file_h5(self, fname):
"""
If an HDF store containing raw counts has been specified, open the
store, copy those counts into this store, and close the counts store.
Copies all tables in the ``'/raw'`` group along with their metadata.
"""
store = pd.HDFStore(fname)
self.logger.info(
"Using existing HDF5 data store '{}' for raw data" "".format(fname)
)
# this could probably be much more efficient, but the PyTables docs
# don't explain copying subsets of files adequately
raw_keys = [key for key in store.keys() if key.startswith("/raw/")]
if len(raw_keys) == 0:
raise ValueError(
"No raw counts found in '{}' [{}]" "".format(fname, self.name)
)
else:
for k in raw_keys:
# copy the data table
raw = store[k]
self.store.put(k, raw, format="table", data_columns=raw.columns)
# copy the metadata
self.set_metadata(k, self.get_metadata(k, store=store), update=False)
self.logger.info("Copied raw data '{}'".format(k))
store.close()
def counts_from_file_tsv(self, fname):
"""
If a counts file in tsv format has been specified, read the counts into
a new dataframe and save as raw counts.
"""
df = pd.read_table(fname, sep="\t", header=0, index_col=0)
if df.columns != ["count"]:
raise ValueError(
"Invalid column names for counts file [{}]" "".format(self.name)
)
if len(df) == 0:
raise ValueError("Empty counts file [{}]".format(self.name))
label = None
for elem in ELEMENT_LABELS:
if elem in self.labels:
label = elem
break
if label is None:
raise ValueError("No valid element labels [{}]".format(self.name))
key = "/raw/{}/counts".format(label)
self.store.put(key, df, format="table", data_columns=df.columns, dtype=np.int32)
def counts_from_file(self, fname):
"""Get raw counts from a counts file instead of FASTQ_ file.
The ``'/raw/<element>/counts'`` table will be populated using the given
input file. The input file should be a two-column file readable by
``pandas`` as a series or two-column dataframe or an Enrich2 HDF5 file.
If the input file is a two-column file, the index will be checked using
the SeqLib's ``validate_index()`` method.
If the input file is an HDF5 file, the entire set of ``'/raw'`` tables
will be copied over, with the metadata intact.
"""
if not os.path.exists(fname):
raise IOError("Counts file '{}' not found [{}]" "".format(fname, self.name))
elif os.path.splitext(fname)[-1].lower() in (".h5"):
self.counts_from_file_h5(self.counts_file)
elif os.path.splitext(fname)[-1].lower() in (".txt", ".tsv", ".csv"):
self.counts_from_file_tsv(self.counts_file)
else:
raise ValueError(
"Unrecognized counts file extension for '{}' "
"[{}]".format(fname, self.name)
)
| bsd-3-clause | 4,788,003,668,979,767,000 | 36.376471 | 88 | 0.556563 | false |
MRCIEU/melodi | melodi/settings.py | 1 | 8804 | """
Django settings for melodi project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import timedelta
from celery.schedules import crontab,timedelta
from django.core.urlresolvers import reverse_lazy
import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.secret_key
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
#ALLOWED_HOSTS = []
#Add this for public
ALLOWED_HOSTS = ['melodi.biocompute.org.uk','www.melodi.biocompute.org.uk','melodi.mrcieu.ac.uk']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'browser',
'social_auth',
'django.contrib.humanize'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
#'django.middleware.cache.UpdateCacheMiddleware', #need this for cache
'django.middleware.common.CommonMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware', #need this for cache
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_ENABLED_BACKENDS = ('google')
LOGIN_URL = '/login/'
LOGIN_ERROR_URL = '/login-error/'
LOGIN_REDIRECT_URL = reverse_lazy('home')
GOOGLE_OAUTH2_CLIENT_ID = '744265706742-h9l3etr7pdboc8d0h0b14biiemtfsbvb.apps.googleusercontent.com'
GOOGLE_OAUTH2_CLIENT_SECRET = 'BsQyz4BxaC82kYD_O5UHcgaF'
#GOOGLE_WHITE_LISTED_DOMAINS = ['bristol.ac.uk']
SOCIAL_AUTH_USER_MODEL = 'auth.User'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'social_auth.context_processors.social_auth_by_type_backends'
)
ROOT_URLCONF = 'melodi.urls'
APPEND_SLASH = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'browser/templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'melodi.wsgi.application'
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#}
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/var/django/melodi/mysql.cnf',
},
}
}
# NEO4J_DATABASES = {
# 'default' : {
# 'HOST':'10.0.2.2',
# 'PORT':7474,
# 'ENDPOINT':'/db/data'
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
#STATIC_ROOT = '/var/django/melodi/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/django/melodi/'
DATA_FOLDER = os.path.join(BASE_DIR,"data/")
# CELERY SETTINGS
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_ACKS_LATE = True
#restart the worker process after every task to avoid memory leaks
CELERYD_MAX_TASKS_PER_CHILD = 1
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
#'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'debug.log'),
#'filename': '/tmp/debug.log',
'formatter': 'verbose'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
},
},
'loggers': {
#'django': {
# 'handlers':['file'],
# 'propagate': True,
# 'level':'INFO',
#},
'celery': {
'handlers': ['console'],
'propagate': False,
'level': 'WARNING',
},
'browser': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
#CACHE_MIDDLEWARE_ALIAS = 'default'
#CACHE_MIDDLEWARE_SECONDS = 60480000
#CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
#"SOCKET_TIMEOUT": 50,
},
"KEY_PREFIX": "melodi",
'TIMEOUT': None
}
}
#CACHES = {
# 'default': {
# #'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'melodi_cache',
# 'TIMEOUT': None
# }
#}
CELERYBEAT_SCHEDULE = {
#'t1': {
# 'task': 'tasks.test_scheduler',
# 'schedule': timedelta(seconds=10),
#},
#update pubmed-mesh relationships every dat at 3am
'dm': {
'task': 'tasks.daily_mesh',
#'schedule': timedelta(hours=1),
'schedule': crontab(hour=3, minute=0),#
},
#'neo': {
# 'task': 'tasks.neo4j_check',
# #'schedule': timedelta(hours=1),
# 'schedule': timedelta(minutes=30),#
#},
}
# Logging
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'formatters': {
# 'verbose': {
# 'format': '[%(asctime)s] %(levelname)-8s %(process)d %(thread)d %(name)s:%(message)s',
# 'datefmt': '%Y-%m-%d %a %H:%M:%S'
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'verbose'
# },
# 'local_file': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.RotatingFileHandler',
# 'formatter': 'verbose',
# #'filename': '%s/debug.log' % APP_ROOT,
# 'filename': os.path.join(BASE_DIR, 'debug2.log'),
# 'maxBytes': 1024 * 1024 * 10,
# },
# 'syslog': {
# 'level': 'INFO',
# 'class': 'logging.handlers.SysLogHandler',
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler',
# 'include_html': True,
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['null'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins', 'console', 'local_file'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# },
# 'root': {
# 'handlers': ['console', 'local_file'],
# 'level': 'DEBUG',
# }
# }
| mit | -2,412,759,635,708,730,400 | 26.860759 | 100 | 0.578828 | false |
JonathanSeguin/Mariana | Mariana/regularizations.py | 1 | 1924 | from Mariana.abstraction import Abstraction_ABC
__all__ = ["SingleLayerRegularizer_ABC", "L1", "L2", "ActivationL1"]
class SingleLayerRegularizer_ABC(Abstraction_ABC) :
"""An abstract regularization to be applied to a layer."""
def apply(self, layer) :
"""Apply to a layer and update networks's log"""
hyps = {}
for k in self.hyperParameters :
hyps[k] = getattr(self, k)
message = "%s uses %s regularization" % (layer.name, self.__class__.__name__)
layer.network.logLayerEvent(layer, message, hyps)
return self.getFormula(layer)
def getFormula(self, layer) :
"""Returns the expression to be added to the cost"""
raise NotImplemented("Must be implemented in child")
class L1(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Weights will tend towards 0
resulting in sparser weight matrices.
.. math::
factor * abs(Weights)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.parameters["W"]).sum() )
class L2(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Causes the weights to stay small
.. math::
factor * (Weights)^2
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( (layer.parameters["W"] ** 2).sum() )
class ActivationL1(SingleLayerRegularizer_ABC) :
"""
L1 on the activations. Neurone activations will tend towards
0, resulting into sparser representations.
Will add this to the cost
.. math::
factor * abs(activations)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.outputs).sum() ) | apache-2.0 | -8,295,609,861,615,857,000 | 26.898551 | 79 | 0.692308 | false |
crentagon/chess-with-benefits | game/chess/show_piece_stats.py | 1 | 2090 |
def run(self, board_input, i, j):
origin_piece = board_input[i][j].piece
max_control = {
1: 2,
3: 8,
4: 13,
5: 14,
9: 27,
0: 8
}
origin_piece.status = 'Healthy'
is_threatened_undefended = len(origin_piece.attackers) > len(origin_piece.defenders)
is_threatened_by_lower_rank = [x for x in origin_piece.attackers if x < origin_piece.piece_type]
is_ample_activity = origin_piece.tiles_controlled > 0.6*max_control[origin_piece.piece_type]
offensive_power = len(origin_piece.offensive_power)
defensive_power = len(origin_piece.defensive_power)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
if is_threatened_by_lower_rank or is_threatened_undefended:
origin_piece.status = 'Threatened'
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
elif offensive_power >= 2 or is_ample_activity:
origin_piece.status = 'Warrior'
# Defender (defending at least two pieces)
elif defensive_power >= 2:
origin_piece.status = 'Defender'
self.piece_stats = {
'is_piece_white': origin_piece.is_white,
'piece_type': origin_piece.piece_type,
'tile_control_count': origin_piece.tiles_controlled,
'defenders': origin_piece.defenders,
'attackers': origin_piece.attackers,
'defensive_power': origin_piece.defensive_power,
'offensive_power': origin_piece.offensive_power,
'status': origin_piece.status
}
# "Status":
# Defender/Royal Defender (defending at least two pieces/Defending the King)
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
# Healthy (default)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
# Note: place its value right next to it
# Number of tiles controlled: "Tile Control Count: " // add counter at the bottom
# Number of pieces attacking it: "Attackers: "
# Number of pieces defending it: "Supporters: "
# Number of pieces it is attacking: "Offensive power: "
# Number of pieces it is defending: "Defensive power: " | gpl-3.0 | -6,851,965,051,956,522,000 | 36.339286 | 107 | 0.728708 | false |
leahrnh/ticktock_text_api | breakdown_detector.py | 1 | 1533 | import readall
import gensim
import nltk
import numpy as np
import pickle
# we need to extract some features, now we make it easy now to just use the word2vec, one turn previous turn.
#
model = gensim.models.Word2Vec.load('/tmp/word2vec_50_break')
all_v1 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v1')
all_v2 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v2')
all_v3 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v3')
all_logs = dict(all_v1.items() + all_v2.items() + all_v3.items())
sent_vec = None
for item in all_logs:
print item
conv = all_logs[item]["Turns"]
for turn in conv:
turn_vec_1 = sum(model[nltk.word_tokenize(conv[turn]["You"])])
if len(nltk.word_tokenize(conv[turn]["TickTock"])) ==0:
continue
#print 'TickTock'
#print conv[turn]["TickTock"]
turn_vec_2 = sum(model[nltk.word_tokenize(conv[turn]["TickTock"])])
#print turn_vec_1
#print turn_vec_2
if sent_vec is None:
sent_vec = np.hstack((turn_vec_1,turn_vec_2))
target = np.array(int(conv[turn]["Appropriateness"]))
else:
sent_vec = np.vstack((sent_vec,np.hstack((turn_vec_1,turn_vec_2))))
target = np.hstack((target,int(conv[turn]["Appropriateness"])))
sent = {'data':sent_vec,'target':target}
print sent
with open('sent.pkl','w') as f:
pickle.dump(sent,f)
| gpl-2.0 | 3,179,714,820,388,494,300 | 40.432432 | 109 | 0.589693 | false |
PercyODI/PythonCSharpOOComparison | Utilities/checkLinks.py | 1 | 1091 | import sys, os, re
pattern = re.compile('\[.+\]\((?P<file>.+?)\)', re.MULTILINE) # Matches [text](directory/file.md)
folderDict = {}
numBadLinks = 0;
os.chdir("..") # Assumes this utility is one directory deep.
startDirectory = os.path.abspath(".")
mdFiles = []
for root, subFolders, files in os.walk("."):
if("\." in root):
continue
for f in files:
if ".md" in f: # Only modify MarkDown files
mdFiles.append(os.path.abspath(os.path.join(root, f)))
for mdFile in mdFiles:
os.chdir(os.path.dirname(mdFile))
fileContent = open(mdFile, 'r')
for lineNum, line in enumerate(fileContent, start=1):
matches = pattern.findall(line)
for match in matches:
if not os.path.isfile(match):
numBadLinks += 1
print("\n")
print(os.path.relpath(mdFile, startDirectory) + ", line " + str(lineNum))
print("\t" + match + " is a bad link.")
print("\n")
if numBadLinks < 1:
print("No Bad Links Found!")
else:
print("Found " + str(numBadLinks) + " bad links.")
| mit | 8,224,985,454,478,719,000 | 30.171429 | 97 | 0.583868 | false |
ActiveState/code | recipes/Python/577746_Inherit_Method_Docstrings_Using_Only/recipe-577746.py | 1 | 2640 | """docfunc module"""
from deferred_binder import DeferredBinder
class DocFunc(DeferredBinder):
TRIGGER = None
def __init__(self, f):
super().__init__(f.__name__, f)
self.f = self.target
@staticmethod
def transform(name, context, target, obj=None):
"""The DeferredBinder transform for this subclass.
name - the attribute name to which the function will be bound.
context - the class/namespace to which the function will be bound.
target - the function that will be bound.
obj - ignored.
The DeferredBinder descriptor class will replace itself with the
result of this method, when the name to which the descriptor is requested
for the first time. This can be on the class or an instances of the
class.
This way the class to which the method is bound is available so that the
inherited docstring can be identified and set.
"""
namespace, cls = context
doc = target.__doc__
if doc == DocFunc.TRIGGER:
doc = DocFunc.get_doc(cls, name, DocFunc.TRIGGER)
target.__doc__ = doc
return target
@staticmethod
def get_doc(cls, fname, default=TRIGGER, member=True):
"""Returns the function docstring the method should inherit.
cls - the class from which to start looking for the method.
fname - the method name on that class
default - the docstring to return if none is found.
member - is the target function already bound to cls?
"""
print(cls)
bases = cls.__mro__[:]
if member:
bases = bases[1:]
for base in bases:
print(base)
func = getattr(base, fname, None)
if not func:
continue
doc = getattr(func, '__doc__', default)
if doc == default:
continue
return doc
return default
@staticmethod
def inherits_docstring(f, context=None, fname=None, default=TRIGGER):
"""A decorator that returns a new DocFunc object.
f - the function to decorate.
context - the class/namespace where the function is bound, if known.
fname - the function name in that context, if known.
default - the docstring to return if none is found.
"""
if context is not None:
cls, namespace = context
fname = fname or f.__name__
f.__doc__ = DocFunc.get_doc(cls, fname, default, False)
return f
return DocFunc(f, default)
| mit | 191,561,495,246,149,060 | 31.592593 | 81 | 0.5875 | false |
google/cauliflowervest | cauliflowervest/client/base_client.py | 1 | 9496 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base CauliflowerVestClient class."""
import httplib
import json
import logging
import ssl
import time
import urllib
import urllib2
import webbrowser
import httplib2
import oauth2client.client
import oauth2client.tools
from cauliflowervest import settings as base_settings
from cauliflowervest.client import settings
from cauliflowervest.client import util
# Prefix to prevent Cross Site Script Inclusion.
JSON_PREFIX = ")]}',\n"
class Error(Exception):
"""Class for domain specific exceptions."""
class UserAbort(Error):
"""User aborted process."""
class AuthenticationError(Error):
"""There was an error with authentication."""
class RequestError(Error):
"""There was an error interacting with the server."""
class NotFoundError(RequestError):
"""No passphrase was found."""
class MetadataError(Error):
"""There was an error with machine metadata."""
class CauliflowerVestClient(object):
"""Client to interact with the CauliflowerVest service."""
ESCROW_PATH = None # String path to escrow to, set by subclasses.
# Sequence of key names of metadata to require; see GetAndValidateMetadata().
REQUIRED_METADATA = []
# The metadata key under which the passphrase is stored.
PASSPHRASE_KEY = 'passphrase'
MAX_TRIES = 5 # Number of times to try an escrow upload.
TRY_DELAY_FACTOR = 5 # Number of seconds, (* try_num), to wait between tries.
XSRF_PATH = '/xsrf-token/%s'
def __init__(self, base_url, opener, headers=None):
self._metadata = None
self.base_url = base_url
self.xsrf_url = util.JoinURL(base_url, self.XSRF_PATH)
if self.ESCROW_PATH is None:
raise ValueError('ESCROW_PATH must be set by CauliflowerVestClient subclasses.')
self.escrow_url = util.JoinURL(base_url, self.ESCROW_PATH)
self.opener = opener
self.headers = headers or {}
def _GetMetadata(self):
"""Returns a dict of key/value metadata pairs."""
raise NotImplementedError
def RetrieveSecret(self, target_id):
"""Fetches and returns the passphrase.
Args:
target_id: str, Target ID to fetch the passphrase for.
Returns:
str: passphrase.
Raises:
RequestError: there was an error downloading the passphrase.
NotFoundError: no passphrase was found for the given target_id.
"""
xsrf_token = self._FetchXsrfToken(base_settings.GET_PASSPHRASE_ACTION)
url = '%s?%s' % (util.JoinURL(self.escrow_url, urllib.quote(target_id)),
urllib.urlencode({'xsrf-token': xsrf_token}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
if e.code == httplib.NOT_FOUND:
raise NotFoundError('Failed to retrieve passphrase. %s' % e)
raise RequestError('Failed to retrieve passphrase. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
data = json.loads(content[len(JSON_PREFIX):])
return data[self.PASSPHRASE_KEY]
def GetAndValidateMetadata(self):
"""Retrieves and validates machine metadata.
Raises:
MetadataError: one or more of the REQUIRED_METADATA were not found.
"""
if not self._metadata:
self._metadata = self._GetMetadata()
for key in self.REQUIRED_METADATA:
if not self._metadata.get(key, None):
raise MetadataError('Required metadata is not found: %s' % key)
def SetOwner(self, owner):
if not self._metadata:
self.GetAndValidateMetadata()
self._metadata['owner'] = owner
def _FetchXsrfToken(self, action):
request = urllib2.Request(self.xsrf_url % action)
response = self._RetryRequest(request, 'Fetching XSRF token')
return response.read()
def _RetryRequest(self, request, description, retry_4xx=False):
"""Make the given HTTP request, retrying upon failure."""
for k, v in self.headers.iteritems():
request.add_header(k, v)
for try_num in range(self.MAX_TRIES):
try:
return self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
# Reraise if HTTP 4xx and retry_4xx is False
if 400 <= e.code < 500 and not retry_4xx:
raise RequestError('%s failed: %s' % (description, e))
# Otherwise retry other HTTPError and URLError failures.
if try_num == self.MAX_TRIES - 1:
logging.exception('%s failed permanently.', description)
raise RequestError(
'%s failed permanently: %s' % (description, e))
logging.warning(
'%s failed with (%s). Retrying ...', description, e)
time.sleep((try_num + 1) * self.TRY_DELAY_FACTOR)
def IsKeyRotationNeeded(self, target_id, tag='default'):
"""Check whether a key rotation is required.
Args:
target_id: str, Target ID.
tag: str, passphrase tag.
Raises:
RequestError: there was an error getting status from server.
Returns:
bool: True if a key rotation is required.
"""
url = '%s?%s' % (
util.JoinURL(
self.base_url, '/api/v1/rekey-required/',
self.ESCROW_PATH, target_id),
urllib.urlencode({'tag': tag}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
raise RequestError('Failed to get status. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
return json.loads(content[len(JSON_PREFIX):])
def UploadPassphrase(self, target_id, passphrase, retry_4xx=False):
"""Uploads a target_id/passphrase pair with metadata.
Args:
target_id: str, Target ID.
passphrase: str, passphrase.
retry_4xx: bool, whether to retry when errors are in the 401-499 range.
Raises:
RequestError: there was an error uploading to the server.
"""
xsrf_token = self._FetchXsrfToken(base_settings.SET_PASSPHRASE_ACTION)
# Ugh, urllib2 only does GET and POST?!
class PutRequest(urllib2.Request):
def __init__(self, *args, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = 'application/octet-stream'
urllib2.Request.__init__(self, *args, **kwargs)
self._method = 'PUT'
def get_method(self): # pylint: disable=g-bad-name
return 'PUT'
if not self._metadata:
self.GetAndValidateMetadata()
parameters = self._metadata.copy()
parameters['xsrf-token'] = xsrf_token
parameters['volume_uuid'] = target_id
url = '%s?%s' % (self.escrow_url, urllib.urlencode(parameters))
request = PutRequest(url, data=passphrase)
self._RetryRequest(request, 'Uploading passphrase', retry_4xx=retry_4xx)
def BuildOauth2Opener(credentials):
"""Produce an OAuth compatible urllib2 OpenerDirective."""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.verify_mode = ssl.CERT_REQUIRED
ca_certs_file = settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH
context.load_verify_locations(ca_certs_file)
opener = urllib2.build_opener(
urllib2.HTTPSHandler(context=context),
urllib2.HTTPRedirectHandler())
h = {}
credentials.apply(h)
opener.addheaders = h.items()
return opener
def GetOauthCredentials():
"""Create an OAuth2 `Credentials` object."""
if not base_settings.OAUTH_CLIENT_ID:
raise RuntimeError('Missing OAUTH_CLIENT_ID setting!')
if not settings.OAUTH_CLIENT_SECRET:
raise RuntimeError('Missing OAUTH_CLIENT_SECRET setting!')
httpd = oauth2client.tools.ClientRedirectServer(
('localhost', 0), oauth2client.tools.ClientRedirectHandler)
httpd.timeout = 60
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=base_settings.OAUTH_CLIENT_ID,
client_secret=settings.OAUTH_CLIENT_SECRET,
redirect_uri='http://%s:%s/' % httpd.server_address,
scope=base_settings.OAUTH_SCOPE,
)
authorize_url = flow.step1_get_authorize_url()
webbrowser.open(authorize_url, new=1, autoraise=True)
httpd.handle_request()
if 'error' in httpd.query_params:
raise AuthenticationError('Authentication request was rejected.')
try:
credentials = flow.step2_exchange(
httpd.query_params,
http=httplib2.Http(ca_certs=settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH))
except oauth2client.client.FlowExchangeError as e:
raise AuthenticationError('Authentication has failed: %s' % e)
else:
logging.info('Authentication successful!')
return credentials
| apache-2.0 | 633,161,878,087,102,700 | 32.202797 | 86 | 0.682393 | false |
jeremiedecock/pyai | ailib/optimize/functions/unconstrained.py | 1 | 31848 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017,2018,2019 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contains some classical test functions for unconstrained continuous
single-objective optimization.
"""
__all__ = ['sphere', 'Sphere', 'sphere1d', 'sphere2d', # TODO
'rosen', 'Rosenbrock', 'rosen2d',
'himmelblau', 'Himmelblau', 'himmelblau2d',
'rastrigin', 'Rastrigin', 'rastrigin2d',
'easom', 'Easom', 'easom2d',
'crossintray', 'Crossintray', 'crossintray2d',
'holder', 'Holder', 'holder2d']
import numpy as np
# GENERIC OBJECTIVE FUNCTION ##################################################
class _ObjectiveFunction:
"""Generic *objective function*.
TODO
"""
def __init__(self):
self._objective_function = None
self._gradient_function = None # TODO: use a generic numeric derivative function by default
self._hessian_function = None # TODO: use a generic numeric derivative function by default
self.reset_eval_counters()
self.reset_eval_logs()
self.do_eval_logs = False
self.noise = None
self.ndim = None
self.bounds = None
self.continuous = None
self.translation_vector = np.zeros(shape=self.ndim)
self.function_name = None
self.function_formula = None
self.arg_min = None
@property
def stochastic(self):
return self.noise is not None
@property
def unimodal(self):
raise NotImplementedError
def reset_eval_counters(self):
# TODO: make an external Log (or Counter) class
self.num_eval = 0
self.num_gradient_eval = 0
self.num_hessian_eval = 0
def reset_eval_logs(self):
# TODO: make an external Log class
self.eval_logs_dict = {'x': [], 'fx': []} # TODO
def __call__(self, x):
"""Evaluate one or several points.
This function is a wrapper that does several boring task aside the
evaluation of `func`: check arguments, log results, ...
Parameters
----------
func : callable object
The function used to evaluate `x`.
y : ndarray
The 1D or 2D numpy array containing the points to evaluate.
If `x` is a 2D array, the coordinates of each points are
distributed along *the first dimension*.
For instance, to evaluate the three 2D points (0,0), (1,1) and
(2,2), `x` have to be coded as the following:
`x = np.array([[0, 1, 2], [0, 1, 2]])`
so that the first point is given by `x[:,0]`, the second point by
`x[:,1]`, ... (this makes functions definition much simpler).
Returns
-------
float or ndarray
The results of the evaluation: a scalar if only one point has been
evaluated or a 1D numpy array if several points have been
evaluated.
"""
# Check self._objective_function ########
assert self._objective_function is not None
assert callable(self._objective_function)
# Check x shape #########################
if x.ndim > 0:
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if (x.ndim == 0) or (x.ndim == 1):
self.num_eval += 1
elif x.ndim == 2:
self.num_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
y = self._objective_function(x_translated)
# Apply noise ###########################
if self.noise is not None:
y = self.noise(x, y)
# Update the evals log ##################
# TODO: make an external Log class
if self.do_eval_logs:
if y.ndim == 0:
self.eval_logs_dict['x'].append(x) # TODO
elif y.ndim == 1:
self.eval_logs_dict['x'].extend(x.T) # TODO
else:
raise Exception("Wrong output dimension.")
if y.ndim == 0:
self.eval_logs_dict['fx'].append(y) # TODO
elif y.ndim == 1:
self.eval_logs_dict['fx'].extend(y) # TODO
else:
raise Exception("Wrong output dimension.")
return y
def gradient(self, x):
"""
The derivative (i.e. gradient) of the objective function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._gradient_function is not None
assert callable(self._gradient_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_gradient_eval += 1
elif x.ndim == 2:
self.num_gradient_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
grad = self._gradient_function(x_translated)
return grad
def hessian(self, x):
"""
The Hessian matrix of the objective function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._hessian_function is not None
assert callable(self._hessian_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_hessian_eval += 1
elif x.ndim == 2:
self.num_hessian_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
hess = self._hessian_function(x_translated)
return hess
def __str__(self):
name = r""
if self.stochastic is not None:
name += "stochastic "
if self.function_name is not None:
name += self.function_name
else:
name += self.__class__.__name__
if self.function_formula is not None:
name += ": " + self.function_formula
return name
# SPHERE FUNCTION #############################################################
def sphere(x):
r"""The Sphere function.
The Sphere function is a famous **convex** function used to test the performance of optimization algorithms.
This function is very easy to optimize and can be used as a first test to check an optimization algorithm.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n} x_{i}^2
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
.. image:: sphere_3d.png
.. image:: sphere.png
Example
-------
To evaluate the single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> sphere( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate the single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> sphere( np.array([1, 1, 1]) )
3.0
The result should be :math:`f(x) = 3.0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> sphere( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([0., 2., 8.])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 1` and :math:`f(x_3) = 8`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Sphere function is to be computed
or a two dimension Numpy array of points at which the Sphere function is to be computed.
Returns
-------
float or array_like
The value(s) of the Sphere function for the given point(s) `x`.
See Also
--------
sphere_gradient, sphere_hessian
"""
# Remark: `sum(x**2.0)` is equivalent to `np.sum(x**2.0, axis=0)` but only the latter works if x is a scallar (e.g. x = np.float(3)).
return np.sum(x**2.0, axis=0)
def sphere_gradient(x):
"""
The derivative (i.e. gradient) of the Sphere function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the Sphere function at `x`.
See Also
--------
sphere, sphere_hessian
"""
return 2.0 * x
def sphere_hessian(x):
"""
The Hessian matrix of the Sphere function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Sphere function at `x`.
See Also
--------
sphere, sphere_gradient
"""
return 2.0 * np.ones(x.shape)
class Sphere(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = sphere
self._gradient_function = sphere_gradient
self._hessian_function = sphere_hessian
self.ndim = ndim
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.zeros(self.ndim)
@property
def unimodal(self):
return True
sphere1d = Sphere(ndim=1)
sphere2d = Sphere(ndim=2)
# ROSENBROCK FUNCTION #########################################################
def rosen(x):
r"""The (extended) Rosenbrock function.
The Rosenbrock function is a famous **non-convex** function used to test
the performance of optimization algorithms. The classical two-dimensional
version of this function is **unimodal** but its *extended* :math:`n`-dimensional
version (with :math:`n \geq 4`) is **multimodal** [SHANG06]_.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n-1} \left[100 \left( x_{i+1} - x_{i}^{2} \right)^{2} + \left( x_{i} - 1 \right)^2 \right]
Global minimum:
.. math::
\min =
\begin{cases}
n = 2 & \rightarrow \quad f(1,1) = 0, \\
n = 3 & \rightarrow \quad f(1,1,1) = 0, \\
n > 3 & \rightarrow \quad f(\underbrace{1,\dots,1}_{n{\text{ times}}}) = 0 \\
\end{cases}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
The Rosenbrock has exactly one (global) minimum :math:`(\underbrace{1, \dots,
1}_{n{\text{ times}}})^\top` for :math:`n \leq 3` and an additional *local*
minimum for :math:`n \geq 4` near :math:`(-1, 1, 1, \dots, 1)^\top`.
See http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119
(freely available at http://dl.acm.org/citation.cfm?id=1118014) and
https://en.wikipedia.org/wiki/Rosenbrock_function#Multidimensional_generalisations
for more information.
See https://en.wikipedia.org/wiki/Rosenbrock_function and
http://mathworld.wolfram.com/RosenbrockFunction.html for more information.
The Rosenbrock function, its derivative (i.e. gradient) and its hessian matrix are also implemented in Scipy
([scipy.optimize.rosen](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen.html#scipy.optimize.rosen),
[scipy.optimize.rosen_der](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_der.html#scipy.optimize.rosen_der),
[scipy.optimize.rosen_hess](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess.html#scipy.optimize.rosen_hess) and
[scipy.optimize.rosen_hess_prod](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess_prod.html#scipy.optimize.rosen_hess_prod)).
See [Scipy documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html#rosenbrock-function) for more information.
.. image:: rosenbrock_3d.png
.. image:: rosenbrock.png
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rosenbrock function is to be computed
or a two dimension Numpy array of points at which the Rosenbrock function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rosenbrock function for the given point(s) `x`.
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rosen( np.array([0, 0]) )
1.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rosen( np.array([1, 1, 1]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rosen( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
References
----------
.. [SHANG06] `Shang, Y. W., & Qiu, Y. H. (2006). A note on the extended Rosenbrock function. Evolutionary Computation, 14(1), 119-126. <http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119>`_
"""
return np.sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0)
class Rosenbrock(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rosen
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rosenbrock function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True if self.ndim < 4 else False
rosen2d = Rosenbrock(ndim=2)
# HIMMELBLAU'S FUNCTION #######################################################
def himmelblau(x):
r"""The Himmelblau's function.
The Himmelblau's function is a two-dimensional **multimodal** function.
.. math::
f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2
The function has four global minima:
.. math::
\begin{eqnarray}
f(3, 2) = 0 \\
f(-2.805118, 3.131312) = 0 \\
f(-3.779310, -3.283186) = 0 \\
f(3.584428, -1.848126) = 0
\end{eqnarray}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
It also has one local maximum at :math:`f(-0.270845, -0.923039) = 181.617`.
The locations of all the minima can be found analytically (roots of cubic
polynomials) but expressions are somewhat complicated.
The function is named after David Mautner Himmelblau, who introduced it in
*Applied Nonlinear Programming* (1972), McGraw-Hill, ISBN 0-07-028921-2.
See https://en.wikipedia.org/wiki/Himmelblau%27s_function for more information.
.. image:: himmelblau_3d.png
.. image:: himmelblau.png
Example
-------
To evaluate a single point :math:`x = \begin{pmatrix} 3 \\ 2 \end{pmatrix}`:
>>> himmelblau( np.array([3, 2]) )
0.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate multiple points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> himmelblau( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([170., 106., 26.])
The result should be :math:`f(x_1) = 170`, :math:`f(x_2) = 106` and :math:`f(x_3) = 26`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Himmelblau's function is to be computed
or a two dimension Numpy array of points at which the Himmelblau's function is to be computed.
Returns
-------
float or array_like
The value(s) of the Himmelblau's function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return (x[0]**2.0 + x[1] - 11.0)**2.0 + (x[0] + x[1]**2.0 - 7.0)**2.0
class Himmelblau(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = himmelblau
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The himmelblau function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
himmelblau2d = Himmelblau(ndim=2)
# RASTRIGIN FUNCTION ##########################################################
def rastrigin(x):
r"""The Rastrigin function.
The Rastrigin function is a famous **multimodal** function.
Finding the minimum of this function is a fairly difficult problem due to
its large search space and its large number of local minima.
The classical two-dimensional version of this function has been introduced
by L. A. Rastrigin in *Systems of extremal control* Mir, Moscow (1974).
Its *generalized* :math:`n`-dimensional version has been proposed by H.
Mühlenbein, D. Schomisch and J. Born in *The Parallel Genetic Algorithm as
Function Optimizer* Parallel Computing, 17, pages 619–632, 1991.
On an n-dimensional domain it is defined by:
.. math::
f(\boldsymbol{x}) = An + \sum_{i=1}^{n} \left[ x_{i}^{2} - A \cos(2 \pi x_{i}) \right]
where :math:`A = 10`.
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
See https://en.wikipedia.org/wiki/Rastrigin_function for more information.
.. image:: rastrigin_3d.png
.. image:: rastrigin.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rastrigin( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rastrigin function is to be computed
or a two dimension Numpy array of points at which the Rastrigin function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rastrigin function for the given point(s) `x`.
"""
A = 10.
n = x.shape[0]
return A * n + np.sum(x**2.0 - A * np.cos(2.0 * np.pi * x), axis=0)
class Rastrigin(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rastrigin
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rastrigin function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
rastrigin2d = Rastrigin(ndim=2)
# EASOM FUNCTION ##############################################################
def easom(x):
r"""The Easom function.
The Easom function is a 2 dimensions **unimodal** function.
.. math::
f(x_1, x_2) = -\cos(x_1) \cos(x_2) \exp \left( -\left[ (x_1-\pi)^2 + (x_2-\pi)^2 \right] \right)
Global minimum:
.. math::
f(\pi, \pi) = -1
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
See https://www.sfu.ca/~ssurjano/easom.html for more information.
.. image:: easom_3d.png
.. image:: easom.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> easom( np.array([np.pi, np.pi]) )
-1.0
The result should be :math:`f(x) = -1`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} \pi \\ \pi \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` at once:
>>> easom( np.array([[np.pi, 0, 1], [np.pi, 0, 1]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-1., -2.67528799e-09, -3.03082341e-05])
The result should be :math:`f(x_1) = -1`, :math:`f(x_2) \approx 0` and :math:`f(x_3) \approx 0`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Easom function is to be computed
or a two dimension Numpy array of points at which the Easom function is to be computed.
Returns
-------
float or array_like
The value(s) of the Easom function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-((x[0]-np.pi)**2.0 + (x[1]-np.pi)**2.0))
class Easom(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = easom
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The easom function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True
easom2d = Easom(ndim=2)
# CROSS-IN-TRAY FUNCTION ######################################################
def crossintray(x):
r"""The Cross-in-tray function.
The Cross-in-tray function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) = -0.0001 \left( \left| \sin(x_1) \sin(x_2) \exp \left( \left| 100 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right)\right| + 1 \right)^{0.1}
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(1.34941, -1.34941) &= -2.06261 \\
f(1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, -1.34941) &= -2.06261 \\
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: cross_in_tray_3d.png
.. image:: cross_in_tray.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> crossintray( np.array([0, 0]) )
-0.0001
The result should be :math:`f(x) = -0.0001`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1.34941 \\ 1.34941 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} -1.34941 \\ -1.34941 \end{pmatrix}` at once:
>>> crossintray( np.array([[0, 1.34941, -1.34941], [0, 1.34941, -1.34941]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ -0.0001, -2.06261, -2.06261])
The result should be :math:`f(x_1) = -0.0001`, :math:`f(x_2) = -2.06261` and :math:`f(x_3) = -2.06261`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Cross-in-tray function is to be computed
or a two dimension Numpy array of points at which the Cross-in-tray function is to be computed.
Returns
-------
float or array_like
The value(s) of the Cross-in-tray function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -0.0001 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * np.exp( np.abs( 100.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi ))) + 1.0)**0.1
class Crossintray(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = crossintray
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The crossintray function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
crossintray2d = Crossintray(ndim=2)
# HÖLDER TABLE FUNCTION #######################################################
def holder(x):
r"""The Hölder table function.
The Hölder table function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) =
-\left| \sin(x_1) \cos(x_2) \exp \left( \left| 1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right) \right|
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(8.05502, 9.66459) &= -19.2085 \\
f(-8.05502, 9.66459) &= -19.2085 \\
f(8.05502, -9.66459) &= -19.2085 \\
f(-8.05502, -9.66459) &= -19.2085
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: holder_3d.png
.. image:: holder.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> holder( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 0 \end{pmatrix}` at once:
>>> holder( np.array([[0., 0., 1.], [0., 1., 0.]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-0. , -0. , -1.66377043])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 0` and :math:`f(x_3) = -1.66377043`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Hölder table function is to be computed
or a two dimension Numpy array of points at which the Hölder table function is to be computed.
Returns
-------
float or array_like
The value(s) of the Hölder table function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi )))
class Holder(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = holder
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The holder function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
holder2d = Holder(ndim=2)
| mit | -930,977,634,268,075,000 | 28.453284 | 209 | 0.565847 | false |
gavein/sleeping-god | SleepingGodObjects/Vessel.py | 1 | 2849 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
from Constants import WEAR_AT_TURN, OXYGEN_AT_TURN, CARGO_WATER, CARGO_MINERALS
from SleepingGodObjects.GameObjects import GameObject
class Vessel(GameObject):
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0):
GameObject.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks)
self.cargo = cargo
self.cargo_keys = [
CARGO_WATER,
CARGO_MINERALS
]
for key in self.cargo_keys:
if not self.cargo.has_key(key):
self.cargo[key] = 0
self.oxygen = oxygen
self.oxygen_max = oxygen
self.hull = hull
self.wear = hull
self.wear_resistance = wear_resistance
def move(self, dx, dy):
self.pos_x += dx
self.pos_y += dy
turn_wear = WEAR_AT_TURN - self.wear_resistance
self.wear -= turn_wear
self.oxygen -= OXYGEN_AT_TURN
def cargo_info(self, key):
if self.cargo.has_key(key):
return self.cargo[key]
class PlayerVessel(Vessel):
SOLAR_SAIL = u"фотонный парус"
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0,
propulsion=SOLAR_SAIL):
Vessel.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo,
oxygen,
hull,
wear_resistance)
self.propulsion = propulsion
self.abilities = []
def increase_resources(self, minerals, water):
self.cargo[CARGO_MINERALS] += minerals
self.cargo[CARGO_WATER] += water
def add_ability(self, ability):
self.abilities.append(ability)
def get_ability_name(self, abilitY):
return ability.name
def get_ability_description(self, ability):
return ability.description
def use_ability(self, ability, *args):
if ability in self.abilities:
ability.use(args)
| gpl-3.0 | -8,537,353,593,604,741,000 | 25.259259 | 79 | 0.420663 | false |
ENCODE-DCC/dxencode | scrub.py | 1 | 34431 | #!/usr/bin/env python2.7
# scrub.py 1.0.0
#
# Scrub.py will remove all files for an experiment [replicate] and genome/annotation
#
# 1) Lookup experiment type from encoded, based on accession
# 2) Locate the experiment accession named folder
# 3) Given the experiment type, determine the expected results
# 4) Given expected results locate any files (by glob) that should be removed
# a) each single replicate (in replicate sub-folders named as reN_N/
# b) combined replicates in the experiment folder itself
# 5) For each file that should be removed, determine if the file has already been posted
# 6) For each file that needs to be removed and has already been posted, remove
import argparse,os, sys
import json, urlparse, subprocess, itertools, logging, time
from datetime import datetime
from base64 import b64encode
import commands
import dxpy
import dx
import encd
class Scrub(object):
'''
Scrub module removes posted experiment files from dnanexus.
'''
TOOL_IS = 'scrub'
HELP_BANNER = "Scrubs posted files from DX. "
''' This help banner is displayed by get_args.'''
SERVER_DEFAULT = 'www'
'''This the server to makes files have been posed to.'''
FOLDER_DEFAULT = "/"
'''Where to start the search for experiment folders.'''
EXPERIMENT_TYPES_SUPPORTED = [ 'long-rna-seq', 'small-rna-seq', 'rampage', 'dna-me', 'dnase-seq' ]
'''This module supports only these experiment (pipeline) types.'''
SKIP_VALIDATE = {"transcription start sites":'bed'}
'''Some output_types cannot currently be validated, theoretically'''
# Pipeline specifications include order of steps, steps per replicate, combined steps and
# within steps, the output_type: file_glob that define expected results.
# Note: that some steps have multiple files with the same output_type (e.g. hotspot: bed & bb).
# When this happens, key on "output_type|format|format_type": file_glob
# (e.g. "hotspot|bed|narrowPeak": "*_hotspot.bed" and "hotspot|bb|narrowPeak": "*_hotspot.bb")
# TODO: This could be done more gracefully.
PIPELINE_SPECS = {
"long-rna-seq": {
"step-order": [ "align-tophat","signals-top-se","signals-top-pe",
"align-star","signals-star-se","signals-star-pe","quant-rsem","mad-qc"],
"replicate": {
"align-tophat": { "alignments": "*_tophat.bam" ,
"QC_only": "*_flagstat.txt" },
"signals-top-se": { "signal of all reads": "*_tophat_all.bw",
"signal of unique reads": "*_tophat_uniq.bw" },
"signals-top-pe": { "minus strand signal of all reads": "*_tophat_minusAll.bw",
"plus strand signal of all reads": "*_tophat_plusAll.bw",
"minus strand signal of unique reads": "*_tophat_minusUniq.bw",
"plus strand signal of unique reads": "*_tophat_plusUniq.bw" },
"signals-star-se": { "signal of all reads": "*_star_genome_all.bw",
"signal of unique reads": "*_star_genome_uniq.bw" },
"signals-star-pe": { "minus strand signal of all reads": "*_star_genome_minusAll.bw",
"plus strand signal of all reads": "*_star_genome_plusAll.bw",
"minus strand signal of unique reads": "*_star_genome_minusUniq.bw",
"plus strand signal of unique reads": "*_star_genome_plusUniq.bw" },
"align-star": { "alignments": "*_star_genome.bam",
"transcriptome alignments": "*_star_anno.bam",
"QC_only": "*_star_Log.final.out" },
"quant-rsem": { "gene quantifications": "*_rsem.genes.results",
"transcript quantifications": "*_rsem.isoforms.results" } },
"combined": {
"mad-qc": { "QC_only": "*_mad_plot.png" } },
},
"small-rna-seq": {
"step-order": [ "align","signals","mad_qc"],
"replicate": {
"align": { "alignments": "*_srna_star.bam",
"gene quantifications": "*_srna_star_quant.tsv" },
"signals": { "plus strand signal of all reads": "*_srna_star_plusAll.bw",
"minus strand signal of all reads": "*_srna_star_minusAll.bw",
"plus strand signal of unique reads": "*_srna_star_plusUniq.bw",
"minus strand signal of unique reads": "*_srna_star_minusUniq.bw" } },
"combined": {
"mad_qc": { "QC_only": "*_mad_plot.png" } },
},
"rampage": {
"step-order": [ "align","signals","peaks","idr","mad_qc"],
"replicate": {
"align": { "alignments": "*_star_marked.bam",
"QC_only": "*_flagstat.txt" },
"signals": { "plus strand signal of all reads": "*_5p_plusAll.bw",
"minus strand signal of all reads": "*_5p_minusAll.bw",
"plus strand signal of unique reads": "*_5p_plusUniq.bw",
"minus strand signal of unique reads": "*_5p_minusUniq.bw" },
"peaks": { "transcription start sites|gff|gff3": "*_peaks.gff.gz",
"transcription start sites|bed|tss_peak": "*_peaks.bed.gz",
"transcription start sites|bigBed|tss_peak": "*_peaks.bb",
"gene quantifications": "*_peaks_quant.tsv" } },
"combined": {
"idr": { "transcription start sites|bed|idr_peak": "*_idr.bed.gz",
"transcription start sites|bigBed|idr_peak": "*_idr.bb" },
"mad_qc": { "QC_only": "*_mad_plot.png" } },
},
"dna-me": {
"step-order": [ "align","quantification","corr"], # How to: 1) combine 3 steps into 1; 2) tech lvl, bio lvl, exp lvl
"replicate": {
"align": { "alignments": [ "*_techrep_bismark_pe.bam", "*_bismark.bam" ] }, # *may* have samtools_flagstat, samtools_stats, Don't wan't bismark_map
"quantification": { "methylation state at CpG|bigBed|bedMethyl": "*_bismark_biorep_CpG.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CpG|bed|bedMethyl": "*_bismark_biorep_CpG.bed.gz", # All have: samtools_flagstat, bismark_map
"methylation state at CHG|bigBed|bedMethyl": "*_bismark_biorep_CHG.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CHG|bed|bedMethyl": "*_bismark_biorep_CHG.bed.gz", # All have: samtools_flagstat, bismark_map
"methylation state at CHH|bigBed|bedMethyl": "*_bismark_biorep_CHH.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CHH|bed|bedMethyl": "*_bismark_biorep_CHH.bed.gz", # All have: samtools_flagstat, bismark_map
"signal": "*_bismark_biorep.bw" } }, # All have: samtools_flagstat, bismark_map
"combined": {
"corr": { "QC_only": "*_CpG_corr.txt" } }, # Not yet defined in encodeD
},
"dnase-seq": {
"step-order": [ "dnase-align-bwa","dnase-filter","dnase-call-hotspots"],
"replicate": {
"dnase-align-bwa": { "unfiltered alignments": "*_bwa_techrep.bam" },
"dnase-filter": { "alignments": "*_bwa_biorep_filtered.bam" },
"dnase-call-hotspots": { "hotspots|bed|broadPeak": "*_hotspots.bed.gz",
"hotspots|bigBed|broadPeak": "*_hotspots.bb",
"peaks|bed|narrowPeak": "*_peaks.bed.gz",
"peaks|bigBed|narrowPeak": "*_peaks.bb",
"signal of unique reads": "*_density.bw" } },
"combined": {
},
},
}
# Step children are steps that should be combined with their parent step rather than be treated as a separate job
STEP_CHILDREN = {
"dme-cx-to-bed": "dme-extract-pe",
"dme-cx-to-bed-alt": "dme-extract-se",
"dme-bg-to-signal": "dme-extract-pe",
"dme-bg-to-signal-alt": "dme-extract-se",
}
ASSEMBLIES_SUPPORTED = { "hg19": "hg19", "GRCh38": "GRCh38", "mm10": "mm10" }
'''This module supports only these assemblies.'''
ANNOTATIONS_SUPPORTED = [ 'V24', 'V19', 'M2', 'M3', 'M4' ]
'''This module supports only these annotations.'''
REQUIRE_ANNOTATION = [ 'long-rna-seq','small-rna-seq','rampage' ]
'''These assays require an annotation.'''
FORMATS_SUPPORTED = ["bam","bed","bigBed","bigWig","fasta","fastq","gff","gtf","hdf5","idat","rcc","CEL",
"tsv","csv","sam","tar","wig","txt"]
EXTENSION_TO_FORMAT = {
"2bit": "2bit",
"cel.gz": "CEL",
"bam": "bam",
"bed.gz": "bed", "bed": "bed",
"bigBed": "bigBed", "bb": "bigBed",
"bigWig": "bigWig", "bw": "bigWig",
"csfasta.gz": "csfasta",
"csqual.gz": "csqual",
"fasta.gz": "fasta", "fa.gz": "fasta", "fa": "fasta",
"fastq.gz": "fastq", "fq.gz": "fastq", "fq": "fastq",
"gff.gz": "gff", "gff": "gff",
"gtf.gz": "gtf", "gtf": "gtf",
"h5": "hdf5",
"idat": "idat",
"rcc": "rcc",
"tar.gz": "tar", "tgz": "tar",
"tsv": "tsv", "results": "tsv",
"csv": "csv",
"wig.gz": "wig", "wig": "wig",
"sam.gz": "sam", "sam": "sam"
}
'''List of supported formats, and means of recognizing with file extensions.'''
PRIMARY_INPUT_EXTENSION = [ "fastq","fq"]
'''List of file extensions used to recognize primary inputs to parse accessions.'''
def __init__(self):
'''
Scrub expects one or more experiment ids as arguments and will find files that
should be removed from the associated directory.
'''
self.args = {} # run time arguments
self.server_key = 'www' # TODO: replace with self.encd.server_key when Encd class is created
self.server = None # TODO: replace with self.encd.server() when Encd class is created
self.acc_prefix = "TSTFF"
self.proj_name = None
self.project = None
self.proj_id = None
self.exp = {} # Will hold the encoded exp json
self.exp_id = None
self.exp_type = {} # Will hold the experiment's assay_type, normalized to known tokens.
self.genome = None # genome should be required
self.annotation = None # if appropriate (mice), points the way to the sub-dir
self.pipeline = None # pipeline definitions (filled in when experiment type is known)
self.replicates = None # lost replicate folders currently found beneath experiment folder
self.fastqs_too = False
self.test = True # assume Test until told otherwise
self.force = False # remove files whether posted or not
self.remove_all = False # Removes experiment dir and all files beneath it recursively (Requires force!)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
encd.logger = logging.getLogger(__name__ + '.dxe') # I need this to avoid some errors
encd.logger.addHandler(logging.StreamHandler()) #logging.NullHandler)
print
def get_args(self,parse=True):
'''Parse the input arguments.'''
### PIPELINE SPECIFIC
ap = argparse.ArgumentParser(description=self.HELP_BANNER + "All results " +
"are expected to be in folder /<resultsLoc>/<experiment> and any replicate " +
"sub-folders named as " +
"<experiment>/rep<biological-replicate>_<technical-replicate>.")
### PIPELINE SPECIFIC
ap.add_argument('-e', '--experiments',
help='One or more ENCODED experiment accessions, or file containing list',
nargs='+',
required=True)
ap.add_argument('--project',
help="Project to run analysis in (default: '" + \
dx.env_get_current_project() + "')",
required=False)
ap.add_argument('-f','--folder',
help="The location to search for experiment folders (default: " + \
"'<project>:" + self.FOLDER_DEFAULT + "')",
default=self.FOLDER_DEFAULT,
required=False)
ap.add_argument('--server',
help="Server that files should have been posted to (default: '" + self.SERVER_DEFAULT + "')",
default=self.SERVER_DEFAULT,
required=False)
ap.add_argument('-g','--genome',
help="The genome assembly that files were aligned to (default: discovered if possible)",
default=None,
required=True)
ap.add_argument('--fastqs_too',
help='Remove fastqs too.',
action='store_true',
required=False)
ap.add_argument('--test',
help='Test run only, do not launch anything.',
action='store_true',
required=False)
ap.add_argument('--start_at',
help="Start processing with this file name (or possibly accession).",
default=None,
required=False)
ap.add_argument('--files',
help="Just delete this number of files (default: all)",
type=int,
default=0,
required=False)
ap.add_argument('--remove_all',
help='Remove all files and directory (default is to leave fastqs and workflows) Requires force!',
action='store_true',
required=False)
ap.add_argument('--force',
help='Remove files regardless of whether they have been posted or not.',
action='store_true',
required=False)
ap.add_argument('--verbose',
help='More debugging output.',
action='store_true',
required=False)
if parse:
return ap.parse_args()
else:
return ap
def pipeline_specification(self,args,exp_type,exp_folder,verbose=False):
'''Sets the pipeline specification object for this experiment type.'''
# Start with dict containing common variables
#self.expected = copy.deepcopy(self.PIPELINE_SPECS[exp_type])
pipeline_specs = self.PIPELINE_SPECS.get(exp_type)
self.annotation = None # TODO: if appropriate, need way to determine annotation
if verbose:
print >> sys.stderr, "Pipeline specification:"
print >> sys.stderr, json.dumps(pipeline_specs,indent=4)
return pipeline_specs
def strip_comments(self,line,ws_too=False):
"""Strips comments from a line (and opptionally leading/trailing whitespace)."""
bam = -1
ix = 0
while True:
bam = line[ix:].find('#',bam + 1)
if bam == -1:
break
bam = ix + bam
if bam == 0:
return ''
if line[ bam - 1 ] != '\\':
line = line[ 0:bam ]
break
else: #if line[ bam - 1 ] == '\\': # ignore '#' and keep looking
ix = bam + 1
#line = line[ 0:bam - 1 ] + line[ bam: ]
if ws_too:
line = line.strip()
return line
def load_exp_list(self,exp_ids,verbose=False):
'''Returns a sorted list of experiment accessions from command-line args.'''
#verbose=True
id_list = []
file_of_ids = None
# If only one, it could be a file
if len(exp_ids) == 1:
candidate = exp_ids[0]
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
return id_list
else:
file_of_ids = candidate
if file_of_ids != None:
with open(file_of_ids, 'r') as fh:
#line = fh.readline()
for line in fh:
line = self.strip_comments(line,True)
if line == '':
continue
candidate = line.split()[0]
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
elif verbose:
print >> sys.stderr, "Value is not experiment id: '"+candidate+"'"
elif len(exp_ids) > 0:
for candidate in exp_ids:
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
elif verbose:
print >> sys.stderr, "Value is not experiment id: '"+candidate+"'"
if len(id_list) > 0:
sorted_exp_ids = sorted(id_list)
if verbose:
print >> sys.stderr, "Experiment ids: "
print >> sys.stderr, json.dumps(sorted_exp_ids)
print "Requested scrubbing %d experiments" % len(sorted_exp_ids)
return sorted_exp_ids
return []
def file_format(self,file_name):
'''Try to determine file format from file name extension.'''
ext = file_name.split(".")[-1]
if ext == "gz" or ext == "tgz":
ext = file_name.split(".")[-2]
if ext in self.EXTENSION_TO_FORMAT.keys():
ext = self.EXTENSION_TO_FORMAT[ext]
if ext in self.FORMATS_SUPPORTED:
return ext
return None
def find_step_files(self,file_globs,result_folder,rep_tech,verbose=False):
'''Returns tuple list of (type,rep_tech,fid) of ALL files expected for a single step.'''
step_files = []
for token in file_globs.keys():
if type(file_globs[token]) == list:
for file_glob in file_globs[token]:
if token != "QC_only":
if self.file_format(file_glob) == None:
print "Error: file glob %s has unknown file format! Please fix" % file_glob
sys.exit(1)
if verbose:
print >> sys.stderr, "-- Looking for %s" % (result_folder + file_glob)
fids = dx.find_file(result_folder + file_glob,self.proj_id, verbose=verbose, multiple=True, recurse=False)
if fids != None:
if not isinstance(fids, list):
fids = [ fids ]
QC_only = (token == "QC_only") # Use new qc_object posting methods
for fid in fids:
step_files.append( (token,rep_tech,fid,QC_only) )
break # Only looking for the first hit
else:
if token != "QC_only":
if self.file_format(file_globs[token]) == None:
print "Error: file glob %s has unknown file format! Please fix" % file_globs[token]
sys.exit(1)
if verbose:
print >> sys.stderr, "-- Looking for %s" % (result_folder + file_globs[token])
fids = dx.find_file(result_folder + file_globs[token],self.proj_id, verbose=verbose, multiple=True, recurse=False)
if fids != None:
if not isinstance(fids, list):
fids = [ fids ]
#if verbose:
# print >> sys.stderr, "-- Found %d files for %s" % (len(fids),result_folder + file_globs[token])
QC_only = (token == "QC_only") # Use new qc_object posting methods
for fid in fids:
step_files.append( (token,rep_tech,fid,QC_only) )
#else:
# return [] # Only include files from completed steps!
return step_files
def find_expected_files(self,exp_folder,replicates,verbose=False):
'''Returns tuple list of (type,rep_tech,fid) of files expected to be posted to ENCODE.'''
expected = []
# First find replicate step files
added_fastqs = False
for step in self.pipeline["step-order"]:
if step not in self.pipeline["replicate"]:
continue
if self.fastqs_too and not added_fastqs and 'fastqs' not in self.pipeline["replicate"][step]:
self.pipeline["replicate"][step]['fastqs'] = "*.fastq.gz"
added_fastqs = True # Just adding this to the first step is all that is needed.
for rep_tech in replicates:
step_files = self.find_step_files(self.pipeline["replicate"][step], \
exp_folder + rep_tech + '/',rep_tech,verbose)
if verbose:
print >> sys.stderr, "-- Found %d files for %s" % (len(step_files),step)
if len(step_files) > 0:
expected.extend(step_files) # keep them in order!
# Now add combined step files
for step in self.pipeline["step-order"]:
if step not in self.pipeline["combined"]:
continue
step_files = self.find_step_files(self.pipeline["combined"][step], \
exp_folder,"combined",verbose)
if len(step_files) > 0:
expected.extend(step_files) # keep them in order!
if verbose:
print >> sys.stderr, "Expected files:"
print >> sys.stderr, json.dumps(expected,indent=4)
return expected
def input_exception(self,inp_fid):
'''Returns True if this is one of a limit number of input files we do not track in encodeD.'''
# TODO: move specifics to json at top of file.
# Unfortunate special case: the map_report is essentially a QC_only file but is an input to a step in order to
# combine multiple map_reports into a single qc_metric.
try:
if self.exp_type != "dna-me" or not dx.file_path_from_fid(inp_fid).endswith("_map_report.txt"):
#print "** Ignoring file: " + dx.file_path_from_fid(inp_fid)
return False
except:
pass
return True
def find_removable_files(self,files_expected,test=True,verbose=False):
'''Returns the tuple list of files that NEED to be posted to ENCODE.'''
removable = []
not_posted = 0
acc_key = dx.property_accesion_key(self.server_key) # 'accession'
for (out_type, rep_tech, fid, QC_only) in files_expected:
if not QC_only:
acc = dx.file_get_property(acc_key,fid)
if acc != None or self.force:
removable.append( (out_type,rep_tech,fid, False) )
elif self.input_exception(fid):
removable.append( (out_type,rep_tech,fid, False) )
else:
# TODO: back up plan, look on encodeD?
not_posted += 1
print >> sys.stderr, "* WARNING: file '" + dx.file_path_from_fid(fid) + \
"' has not been posted, and will not be scrubbed."
#else: # TODO: How to handle qc_only files (other than just removing them)?
if not_posted > 0 and not self.force: # If even one file is not posted, then none are removable
return []
# if all expected non-QC files are remobable, then go ahead and remove the qc ones as well
for (out_type, rep_tech, fid, QC_only) in files_expected:
if QC_only:
removable.append( (out_type,rep_tech,fid, True) )
if verbose:
print >> sys.stderr, "Removable files:"
print >> sys.stderr, json.dumps(removable,indent=4)
return removable
def run(self):
'''Runs scrub from start to finish using command line arguments.'''
args = self.get_args()
self.test = args.test
self.genome = args.genome
self.force = args.force
self.remove_all = args.remove_all
self.fastqs_too = args.fastqs_too
self.server_key = args.server
encd.set_server_key(self.server_key) # TODO: change to self.encd = Encd(self.server_key)
self.server = encd.get_server()
if self.server_key == "www":
self.acc_prefix = "ENCFF"
self.proj_name = dx.env_get_current_project()
if self.proj_name == None or args.project != None:
self.proj_name = args.project
if self.proj_name == None:
print "Please enter a '--project' to run in."
sys.exit(1)
self.project = dx.get_project(self.proj_name)
self.proj_id = self.project.get_id()
print "== Running in project [%s] and expect files already posted to the [%s] server ==" % \
(self.proj_name,self.server_key)
self.exp_ids = self.load_exp_list(args.experiments,verbose=args.verbose)
if len(self.exp_ids) == 0:
print >> sys.stderr, "No experiment id's requested."
self.ap.print_help()
sys.exit(1)
exp_count = 0
exp_removed = 0
exp_kept = 0
deprecates_removed = 0
total_removed = 0
for exp_id in self.exp_ids:
dx.clear_cache()
sys.stdout.flush() # Slow running job should flush to piped log
self.exp_id = exp_id
# 1) Lookup experiment type from encoded, based on accession
print "Working on %s..." % self.exp_id
self.exp = encd.get_exp(self.exp_id,must_find=True)
if self.exp == None or self.exp["status"] == "error":
print "Unable to locate experiment %s in encoded (%s)" % (self.exp_id, self.server_key)
continue
self.exp_type = encd.get_exp_type(self.exp_id,self.exp,self.EXPERIMENT_TYPES_SUPPORTED)
if self.exp_type == None:
continue
# 2) Locate the experiment accession named folder
# NOTE: genome and annotation are not known for this exp yet, so the umbrella folder is just based on exp_type
self.umbrella_folder = dx.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,self.exp_type,"posted",self.genome)
if args.test:
print "- Umbrella folder: " + self.umbrella_folder
self.exp_folder = dx.find_exp_folder(self.project,exp_id,self.umbrella_folder,warn=True)
if self.exp_folder == None:
continue
exp_count += 1
print "- Examining %s:%s for '%s' results..." % \
(self.proj_name, self.exp_folder, self.exp_type)
# Could be quick... remove everything!
if self.remove_all and self.force:
exp_removed += 1
if self.test:
print "* Would remove %s:%s and all results within..." % (self.proj_name, self.exp_folder)
else:
print "* Removing %s:%s and all results within..." % (self.proj_name, self.exp_folder)
dxpy.api.project_remove_folder(self.proj_id,{'folder':self.exp_folder,'recurse':True})
continue
# Remove any 'deprecated' subfolder
deprecated_folder = self.exp_folder + "deprecated/"
if dx.project_has_folder(self.project, deprecated_folder):
deprecates_removed += 1
if self.test:
print "* Would remove %s:%s and all results within..." % (self.proj_name, deprecated_folder)
else:
print "* Removing %s:%s and all results within..." % (self.proj_name, deprecated_folder)
dxpy.api.project_remove_folder(self.proj_id,{'folder':deprecated_folder,'recurse':True})
# 3) Given the experiment type, determine the expected results
self.pipeline = self.pipeline_specification(args,self.exp_type,self.exp_folder)
self.replicates = dx.find_replicate_folders(self.project,self.exp_folder, verbose=args.verbose)
# 4) Given expected results locate any files (by glob) that should have been posted for
# a) each single replicate (in replicate sub-folders named as reN_N/
# b) combined replicates in the experiment folder itself
files_expected = self.find_expected_files(self.exp_folder, self.replicates, verbose=args.verbose)
print "- Found %d files that are available to remove." % len(files_expected)
if len(files_expected) == 0:
continue
# 5) For each file that is available to be removed, determine if the file has been posted first.
files_to_remove = self.find_removable_files(files_expected, test=self.test, verbose=args.verbose)
print "- Found %d files that may be removed" % len(files_to_remove)
if len(files_to_remove) == 0:
print "- KEEPING: If even one file has not been posted, no files may be removed without force."
exp_kept += 1
continue
# 6) For each file that needs to be removed:
files_removed = 0
for (out_type,rep_tech,fid,QC_only) in files_to_remove:
sys.stdout.flush() # Slow running job should flush to piped log
if args.files != 0 and file_count >= args.files: # Short circuit for test
print "- Just trying %d file(s) by request" % file_count
partial = True
break
try:
# prove it exists before continuing.
file_name = dx.file_path_from_fid(fid)
except:
continue
if args.start_at != None:
if not file_name.endswith(args.start_at):
continue
else:
print "- Starting at %s" % (file_name)
args.start_at = None
if self.test:
print " * Would remove file %s..." % file_name
else:
print " * Removing file %s..." % file_name
dxpy.api.project_remove_objects(self.proj_id,{'objects':[fid]})
files_removed += 1
if not args.test:
print "- For %s processed %d file(s), removed %d files" % (self.exp_id, len(files_expected), files_removed)
else:
print "- For %s processed %d file(s), would remove %d files" % (self.exp_id, len(files_expected), files_removed)
total_removed += files_removed
if not args.test:
print "Processed %d experiment(s), erased %d, kept %d, removed %d deprecate folder(s) and %d file(s)" % \
(exp_count, exp_removed, exp_kept, deprecates_removed, total_removed)
else:
print "Processed %d experiment(s), would erase %d, would keep %d, would remove %d deprecate folder(s) and %d file(s)" % \
(exp_count, exp_removed, exp_kept, deprecates_removed, total_removed)
print "(finished)"
if __name__ == '__main__':
'''Run from the command line.'''
scrub = Scrub()
scrub.run()
| mit | -6,581,317,744,336,403,000 | 51.168182 | 177 | 0.500247 | false |
ipfire/collecty | src/collecty/plugins/processor.py | 1 | 7032 | #!/usr/bin/python3
###############################################################################
# #
# collecty - A system statistics collection daemon for IPFire #
# Copyright (C) 2012 IPFire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import multiprocessing
from . import base
from ..colours import *
from ..constants import *
from ..i18n import _
class GraphTemplateProcessor(base.GraphTemplate):
name = "processor"
@property
def rrd_graph(self):
return [
# Add all used CPU cycles
"CDEF:usage=user,nice,+,sys,+,wait,+,irq,+,sirq,+,steal,+,guest,+,guest_nice,+",
# Add idle to get the total number of cycles
"CDEF:total=usage,idle,+",
# Headline
"COMMENT:%s" % EMPTY_LABEL,
"COMMENT:%s" % (COLUMN % _("Current")),
"COMMENT:%s" % (COLUMN % _("Average")),
"COMMENT:%s" % (COLUMN % _("Minimum")),
"COMMENT:%s\\j" % (COLUMN % _("Maximum")),
"CDEF:usage_p=100,usage,*,total,/",
"COMMENT: %s" % (LABEL % _("Total")),
"GPRINT:usage_p_cur:%s" % PERCENTAGE,
"GPRINT:usage_p_avg:%s" % PERCENTAGE,
"GPRINT:usage_p_min:%s" % PERCENTAGE,
"GPRINT:usage_p_max:%s\\j" % PERCENTAGE,
EMPTY_LINE,
"CDEF:user_p=100,user,*,total,/",
"AREA:user_p%s:%s" % (
transparency(CPU_USER, AREA_OPACITY),
LABEL % _("User"),
),
"GPRINT:user_p_cur:%s" % PERCENTAGE,
"GPRINT:user_p_avg:%s" % PERCENTAGE,
"GPRINT:user_p_min:%s" % PERCENTAGE,
"GPRINT:user_p_max:%s\\j" % PERCENTAGE,
"CDEF:nice_p=100,nice,*,total,/",
"AREA:nice_p%s:%s:STACK" % (
transparency(CPU_NICE, AREA_OPACITY),
LABEL % _("Nice"),
),
"GPRINT:nice_p_cur:%s" % PERCENTAGE,
"GPRINT:nice_p_avg:%s" % PERCENTAGE,
"GPRINT:nice_p_min:%s" % PERCENTAGE,
"GPRINT:nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:sys_p=100,sys,*,total,/",
"AREA:sys_p%s:%s:STACK" % (
transparency(CPU_SYS, AREA_OPACITY),
LABEL % _("System"),
),
"GPRINT:sys_p_cur:%s" % PERCENTAGE,
"GPRINT:sys_p_avg:%s" % PERCENTAGE,
"GPRINT:sys_p_min:%s" % PERCENTAGE,
"GPRINT:sys_p_max:%s\\j" % PERCENTAGE,
"CDEF:wait_p=100,wait,*,total,/",
"AREA:wait_p%s:%s:STACK" % (
transparency(CPU_WAIT, AREA_OPACITY),
LABEL % _("Wait"),
),
"GPRINT:wait_p_cur:%s" % PERCENTAGE,
"GPRINT:wait_p_avg:%s" % PERCENTAGE,
"GPRINT:wait_p_min:%s" % PERCENTAGE,
"GPRINT:wait_p_max:%s\\j" % PERCENTAGE,
"CDEF:irq_p=100,irq,*,total,/",
"AREA:irq_p%s:%s:STACK" % (
transparency(CPU_IRQ, AREA_OPACITY),
LABEL % _("Interrupt"),
),
"GPRINT:irq_p_cur:%s" % PERCENTAGE,
"GPRINT:irq_p_avg:%s" % PERCENTAGE,
"GPRINT:irq_p_min:%s" % PERCENTAGE,
"GPRINT:irq_p_max:%s\\j" % PERCENTAGE,
"CDEF:sirq_p=100,sirq,*,total,/",
"AREA:sirq_p%s:%s:STACK" % (
transparency(CPU_SIRQ, AREA_OPACITY),
LABEL % _("Soft Interrupt"),
),
"GPRINT:sirq_p_cur:%s" % PERCENTAGE,
"GPRINT:sirq_p_avg:%s" % PERCENTAGE,
"GPRINT:sirq_p_min:%s" % PERCENTAGE,
"GPRINT:sirq_p_max:%s\\j" % PERCENTAGE,
"CDEF:steal_p=100,steal,*,total,/",
"AREA:steal_p%s:%s:STACK" % (
transparency(CPU_STEAL, AREA_OPACITY),
LABEL % _("Steal"),
),
"GPRINT:steal_p_cur:%s" % PERCENTAGE,
"GPRINT:steal_p_avg:%s" % PERCENTAGE,
"GPRINT:steal_p_min:%s" % PERCENTAGE,
"GPRINT:steal_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_p=100,guest,*,total,/",
"AREA:guest_p%s:%s:STACK" % (
transparency(CPU_GUEST, AREA_OPACITY),
LABEL % _("Guest"),
),
"GPRINT:guest_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_p_min:%s" % PERCENTAGE,
"GPRINT:guest_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_nice_p=100,guest_nice,*,total,/",
"AREA:guest_nice_p%s:%s:STACK" % (
transparency(CPU_GUEST_NICE, AREA_OPACITY),
LABEL % _("Guest Nice"),
),
"GPRINT:guest_nice_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_min:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:idle_p=100,idle,*,total,/",
"AREA:idle_p%s::STACK" % CPU_IDLE,
# Draw contour lines
"LINE:user_p%s" % CPU_USER,
"LINE:nice_p%s::STACK" % CPU_NICE,
"LINE:sys_p%s::STACK" % CPU_SYS,
"LINE:wait_p%s::STACK" % CPU_WAIT,
"LINE:irq_p%s::STACK" % CPU_IRQ,
"LINE:sirq_p%s::STACK" % CPU_SIRQ,
"LINE:steal_p%s::STACK" % CPU_STEAL,
"LINE:guest_p%s::STACK" % CPU_GUEST,
"LINE:guest_nice_p%s::STACK" % CPU_GUEST_NICE,
]
upper_limit = 100
lower_limit = 0
@property
def graph_title(self):
return _("Processor Usage")
@property
def graph_vertical_label(self):
return _("Percent")
class ProcessorObject(base.Object):
rrd_schema = [
"DS:user:DERIVE:0:U",
"DS:nice:DERIVE:0:U",
"DS:sys:DERIVE:0:U",
"DS:idle:DERIVE:0:U",
"DS:wait:DERIVE:0:U",
"DS:irq:DERIVE:0:U",
"DS:sirq:DERIVE:0:U",
"DS:steal:DERIVE:0:U",
"DS:guest:DERIVE:0:U",
"DS:guest_nice:DERIVE:0:U",
]
def init(self, cpu_id=None):
self.cpu_id = cpu_id
@property
def id(self):
if self.cpu_id is not None:
return "%s" % self.cpu_id
return "default"
def collect(self):
"""
Reads the CPU usage.
"""
stat = self.read_proc_stat()
if self.cpu_id is None:
values = stat.get("cpu")
else:
values = stat.get("cpu%s" % self.cpu_id)
# Convert values into a list
values = values.split()
if not len(values) == len(self.rrd_schema):
raise ValueError("Received unexpected output from /proc/stat: %s" % values)
return values
class ProcessorPlugin(base.Plugin):
name = "processor"
description = "Processor Usage Plugin"
templates = [GraphTemplateProcessor]
@property
def objects(self):
yield ProcessorObject(self)
num = multiprocessing.cpu_count()
for i in range(num):
yield ProcessorObject(self, cpu_id=i)
| gpl-3.0 | -3,983,568,614,264,902,000 | 29.441558 | 83 | 0.558447 | false |
SWRG/ESWC2015-paper-evaluation | tests/yen_algorithms_test.py | 1 | 1681 | import sys,os
sys.path.insert(0,os.path.abspath(__file__+"/../.."))
import yenalgo2,corefunctions
import networkx as nx
import unittest
class KnownValues(unittest.TestCase):
g = nx.Graph()
g.add_edge(1,2,{'weight':1})
g.add_edge(1,3,{'weight':2})
g.add_edge(1,4,{'weight':3})
g.add_edge(2,5,{'weight':2})
g.add_edge(2,6,{'weight':1})
g.add_edge(3,7,{'weight':1})
g.add_edge(3,8,{'weight':3})
g.add_edge(3,9,{'weight':4})
g.add_edge(3,10,{'weight':1})
g.add_edge(4,10,{'weight':2})
g.add_edge(4,11,{'weight':2})
g.add_edge(5,12,{'weight':1})
g.add_edge(6,13,{'weight':2})
g.add_edge(10,14,{'weight':2})
g.add_edge(14,15,{'weight':2})
(s,t)=(3,15)
knownValuesYen = (
((1,2),[(1.0, [1, 2])]),
((3,15),[(5.0, [3, 10, 14, 15]), (11.0, [3, 1, 4, 10, 14, 15])]),
((1,15),[(7.0, [1, 3, 10, 14, 15]), (9.0, [1, 4, 10, 14, 15])]),
((4,15),[(6.0, [4, 10, 14, 15]), (10.0, [4, 1, 3, 10, 14, 15])])
)
def test_YenKSP_generator_KnownValues(self):
"""YenKSP_generator should give known result with known input"""
for ((source,target), expected_result) in self.knownValuesYen:
result = [p for p in corefunctions.YenKSP_generator(self.g,source,target)]
self.assertEqual(expected_result, result)
def test_yenalgo2_KnownValues(self):
"""yenalgo2 should give known result with known input"""
for ((source,target), expected_result) in self.knownValuesYen:
result = [p for p in yenalgo2.k_shortest_paths(self.g,source,target,4)]
self.assertEqual(expected_result, result)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -3,794,693,297,594,283,000 | 35.543478 | 86 | 0.572278 | false |
cloudify-cosmo/cloudify-nsx-plugin | cloudify_nsx/network/dhcp_bind.py | 1 | 5021 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_common as common
from cloudify import exceptions as cfy_exc
import cloudify_nsx.library.nsx_esg_dlr as nsx_dhcp
@operation
def create(**kwargs):
validation_rules = {
"esg_id": {
"required": True
},
"vm_id": {
"set_none": True
},
"vnic_id": {
"set_none": True,
"type": "string"
},
"mac": {
"set_none": True
},
"hostname": {
"required": True
},
"ip": {
"required": True
},
"default_gateway": {
"set_none": True
},
"subnet_mask": {
"set_none": True
},
"domain_name": {
"set_none": True
},
"dns_server_1": {
"set_none": True
},
"dns_server_2": {
"set_none": True
},
"lease_time": {
"set_none": True
},
"auto_dns": {
"set_none": True
}
}
use_existing, bind_dict = common.get_properties_and_validate(
'bind', kwargs, validation_rules
)
if use_existing:
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
if bind_dict.get('mac'): # if NONE skip this part
resource_id = nsx_dhcp.add_mac_binding(client_session,
bind_dict['esg_id'],
bind_dict['mac'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
elif bind_dict.get('vnic_id') is not None and bind_dict.get('vm_id'):
resource_id = nsx_dhcp.add_vm_binding(client_session,
bind_dict['esg_id'],
bind_dict['vm_id'],
bind_dict['vnic_id'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
else:
raise cfy_exc.NonRecoverableError(
"Please fill vm_id/vnic_id or mac"
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("Binded %s | %s" % (resource_id, bind_dict))
@operation
def delete(**kwargs):
use_existing, bind_dict = common.get_properties('bind', kwargs)
if use_existing:
common.remove_properties('bind')
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if not resource_id:
common.remove_properties('bind')
ctx.logger.info("We dont have resource_id")
return
# credentials
client_session = common.nsx_login(kwargs)
common.attempt_with_rerun(
nsx_dhcp.delete_dhcp_binding,
client_session=client_session,
resource_id=resource_id
)
ctx.logger.info("deleted %s" % resource_id)
common.remove_properties('bind')
| apache-2.0 | 6,749,471,616,254,207,000 | 33.390411 | 79 | 0.468632 | false |
cajone/pychess | lib/pychess/widgets/pydock/PyDockTop.py | 1 | 9627 | from __future__ import absolute_import
from __future__ import print_function
import os
from xml.dom import minidom
from collections import defaultdict
from pychess.System.prefix import addDataPrefix
from .PyDockLeaf import PyDockLeaf
from .PyDockComposite import PyDockComposite
from .ArrowButton import ArrowButton
from .HighlightArea import HighlightArea
from .__init__ import TabReceiver
from .__init__ import NORTH, EAST, SOUTH, WEST, CENTER
class PyDockTop(PyDockComposite, TabReceiver):
def __init__(self, id, perspective):
TabReceiver.__init__(self, perspective)
self.id = id
self.perspective = perspective
self.set_no_show_all(True)
self.highlightArea = HighlightArea(self)
self.button_cids = defaultdict(list)
self.buttons = (
ArrowButton(self, addDataPrefix("glade/dock_top.svg"), NORTH),
ArrowButton(self, addDataPrefix("glade/dock_right.svg"), EAST),
ArrowButton(self, addDataPrefix("glade/dock_bottom.svg"), SOUTH),
ArrowButton(self, addDataPrefix("glade/dock_left.svg"), WEST))
for button in self.buttons:
self.button_cids[button] += [
button.connect("dropped", self.__onDrop),
button.connect("hovered", self.__onHover),
button.connect("left", self.__onLeave),
]
def _del(self):
self.highlightArea.disconnect(self.highlightArea.cid)
for button in self.buttons:
for cid in self.button_cids[button]:
button.disconnect(cid)
button.myparent = None
self.button_cids = {}
self.highlightArea.myparent = None
#self.buttons = None
#self.highlightArea = None
TabReceiver._del(self)
PyDockComposite._del(self)
def getPosition(self):
return CENTER
def __repr__(self):
return "top (%s)" % self.id
# ===========================================================================
# Component stuff
# ===========================================================================
def addComponent(self, widget):
self.add(widget)
widget.show()
def changeComponent(self, old, new):
self.removeComponent(old)
self.addComponent(new)
def removeComponent(self, widget):
self.remove(widget)
def getComponents(self):
child = self.get_child()
if isinstance(child, PyDockComposite) or isinstance(child, PyDockLeaf):
return [child]
return []
def dock(self, widget, position, title, id):
if not self.getComponents():
leaf = PyDockLeaf(widget, title, id, self.perspective)
self.addComponent(leaf)
return leaf
else:
return self.get_child().dock(widget, position, title, id)
def clear(self):
self.remove(self.get_child())
# ===========================================================================
# Signals
# ===========================================================================
def showArrows(self):
for button in self.buttons:
button._calcSize()
button.show()
def hideArrows(self):
for button in self.buttons:
button.hide()
self.highlightArea.hide()
def __onDrop(self, arrowButton, sender):
self.highlightArea.hide()
child = sender.get_nth_page(sender.get_current_page())
title, id = sender.get_parent().undock(child)
self.dock(child, arrowButton.myposition, title, id)
def __onHover(self, arrowButton, widget):
self.highlightArea.showAt(arrowButton.myposition)
arrowButton.get_window().raise_()
def __onLeave(self, arrowButton):
self.highlightArea.hide()
# ===========================================================================
# XML
# ===========================================================================
def saveToXML(self, xmlpath):
"""
<docks>
<dock id="x">
<v pos="200">
<leaf current="x" dockable="False">
<panel id="x" />
</leaf>
<h pos="200">
<leaf current="y" dockable="True">
<panel id="y" />
<panel id="z" />
</leaf>
<leaf current="y" dockable="True">
<panel id="y" />
</leaf>
</h>
</v>
</dock>
</docks>
"""
dockElem = None
if os.path.isfile(xmlpath):
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
for node in elem.childNodes:
elem.removeChild(node)
dockElem = elem
break
if not dockElem:
doc = minidom.getDOMImplementation().createDocument(None, "docks",
None)
dockElem = doc.createElement("dock")
dockElem.setAttribute("id", self.id)
doc.documentElement.appendChild(dockElem)
if self.get_child():
self.__addToXML(self.get_child(), dockElem, doc)
f_handle = open(xmlpath, "w")
doc.writexml(f_handle)
f_handle.close()
doc.unlink()
def __addToXML(self, component, parentElement, document):
if isinstance(component, PyDockComposite):
pos = component.paned.get_position()
if component.getPosition() in (NORTH, SOUTH):
childElement = document.createElement("v")
size = float(component.get_allocation().height)
else:
childElement = document.createElement("h")
size = float(component.get_allocation().width)
# if component.getPosition() in (NORTH, SOUTH):
# print "saving v position as %s out of %s (%s)" % (str(pos), str(size), str(pos/max(size,pos)))
childElement.setAttribute("pos", str(pos / max(size, pos)))
self.__addToXML(component.getComponents()[0], childElement,
document)
self.__addToXML(component.getComponents()[1], childElement,
document)
elif isinstance(component, PyDockLeaf):
childElement = document.createElement("leaf")
childElement.setAttribute("current", component.getCurrentPanel())
childElement.setAttribute("dockable", str(component.isDockable()))
for panel, title, id in component.getPanels():
element = document.createElement("panel")
element.setAttribute("id", id)
childElement.appendChild(element)
parentElement.appendChild(childElement)
def loadFromXML(self, xmlpath, idToWidget):
""" idTowidget is a dictionary {id: (widget,title)}
asserts that self.id is in the xmlfile """
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
break
else:
raise AttributeError(
"XML file contains no <dock> elements with id '%s'" % self.id)
child = [n for n in elem.childNodes if isinstance(n, minidom.Element)]
if child:
self.addComponent(self.__createWidgetFromXML(child[0], idToWidget))
def __createWidgetFromXML(self, parentElement, idToWidget):
children = [n
for n in parentElement.childNodes
if isinstance(n, minidom.Element)]
if parentElement.tagName in ("h", "v"):
child1, child2 = children
if parentElement.tagName == "h":
new = PyDockComposite(EAST, self.perspective)
else:
new = PyDockComposite(SOUTH, self.perspective)
new.initChildren(
self.__createWidgetFromXML(child1, idToWidget),
self.__createWidgetFromXML(child2, idToWidget),
preserve_dimensions=True)
def cb(widget, event, pos):
allocation = widget.get_allocation()
if parentElement.tagName == "h":
widget.set_position(int(allocation.width * pos))
else:
# print "loading v position as %s out of %s (%s)" % \
# (int(allocation.height * pos), str(allocation.height), str(pos))
widget.set_position(int(allocation.height * pos))
widget.disconnect(conid)
conid = new.paned.connect("size-allocate", cb, float(parentElement.getAttribute("pos")))
return new
elif parentElement.tagName == "leaf":
id = children[0].getAttribute("id")
title, widget = idToWidget[id]
leaf = PyDockLeaf(widget, title, id, self.perspective)
for panelElement in children[1:]:
id = panelElement.getAttribute("id")
title, widget = idToWidget[id]
leaf.dock(widget, CENTER, title, id)
leaf.setCurrentPanel(parentElement.getAttribute("current"))
if parentElement.getAttribute("dockable").lower() == "false":
leaf.setDockable(False)
return leaf
| gpl-3.0 | 4,931,489,659,849,762,000 | 36.901575 | 112 | 0.52976 | false |
xju2/hzzws | scripts/low_mass.py | 1 | 2906 | #!/usr/bin/env python
import common
import glob
name = "Low"
binning = "60, 110, 140"
branch = "m4l_constrained, "+binning
###in workspace
obs_binning = binning
# key: category name
# value: TCut on mini-tree
categories = {
"ggF_4mu_13TeV" : "(event_type==0)",
"ggF_2mu2e_13TeV" : "(event_type==2)",
"ggF_2e2mu_13TeV" : "(event_type==3)",
"ggF_4e_13TeV" : "(event_type==1)",
}
#categories = {"all" : "(1==1)"}
sig_samples = ["ggH", "VBFH", "ZH", "WH", "ttH"]
bkg_samples = ["qqZZ", "Zjets",
"ggZZ"
]
samples = sig_samples + bkg_samples
samples_para = samples
samples_lowmass_sig125 = {
"ggH":common.minitree_dir+"mc15_13TeV.341505.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_ggH125_ZZ4lep_noTau.root",
"VBFH":common.minitree_dir+"mc15_13TeV.341518.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_VBFH125_ZZ4lep_noTau.root",
"WH":common.minitree_dir+"mc15_13TeV.341964.Pythia8EvtGen_A14NNPDF23LO_WH125_ZZ4l.root",
"ZH":common.minitree_dir+"mc15_13TeV.341947.Pythia8EvtGen_A14NNPDF23LO_ZH125_ZZ4l.root",
"ttH":common.minitree_dir+"mc15_13TeV.342561.aMcAtNloHerwigppEvtGen_UEEE5_CTEQ6L1_CT10ME_ttH125_4l.root",
}
#masses = [124, 125, 126]
masses = [125]
mass_points = len(masses)
def get_mass(im):
return masses[im]
def get_sample_dict(mass):
tmp_res = {}
sample_list = sig_samples
for sample_name in sample_list:
pattern = common.minitree_dir+"*"+sample_name+str(mass)+"_*4l*.root"
file_list = glob.glob(pattern)
#print mass,len(file_list), file_list
if len(file_list) == 1:
tmp_res[sample_name] = file_list[0]
elif len(file_list) == 2:
for ff in file_list:
if "noTau" in ff:
tmp_res[sample_name] = ff
return tmp_res
def get_signal_dict():
tmp_dic = {}
for im in range(mass_points):
mass = get_mass(im)
tmp_dic[str(mass)] = get_sample_dict(mass)
return tmp_dic
samples_sig = get_signal_dict()
samples_bkg = {
#"qqZZ":common.minitree_dir+"mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
"qqZZ":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v03/mc_15b/Nominal/mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
#"Zjets":common.minitree_dir+"combined/mc15_redBkg_filtered.root"
"Zjets":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v01/mc/Nominal/combined/mc15_redBkg_filtered.root",
"ggZZ":common.minitree_dir+"mc15_gg2ZZ_low.root",
}
def print_samples():
for sample,add in samples_bkg.iteritems():
print sample,add
for sample,add in samples_sig["125"].iteritems():
print sample,add
#print_samples()
samples_sig_scale = 1.0
samples_bkg_scale = 1.0
data = common.minitree_dir+"../../data15_grl_v73.root"
if __name__ == "__main__":
print_samples()
| mit | -8,342,612,487,512,922,000 | 32.790698 | 174 | 0.652787 | false |
Lothiraldan/OneTask | onetask/tests.py | 1 | 4351 | # -*- coding: utf-8 -*-
import os
import json
import tempfile
import unittest
from .collection import TaskCollection
from subprocess import check_output, CalledProcessError
class TaskCollectionTest(unittest.TestCase):
def _create_db(self, **kwargs):
temp = tempfile.NamedTemporaryFile(prefix='onetasktest', suffix='.json',
mode='w+t', delete=False)
temp.write(json.dumps(dict(**kwargs)))
temp.read()
return temp
def _load(self, **kwargs):
temp = self._create_db(**kwargs)
return TaskCollection.load(temp.name)
def assertCommandOK(self, command):
try:
check_output(command)
except CalledProcessError as err:
raise AssertionError('Command is not ok: ' % err)
def assertCommandKO(self, command):
assert isinstance(command, (list, tuple,))
self.assertRaises(CalledProcessError, check_output, command)
def test_load(self):
tasks = self._load(tasks=[{"title": "task1"}, {"title": "task2"}])
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
self.assertEquals(tasks.data['tasks'][1]['title'], 'task2')
def test_add(self):
tasks = self._load(tasks=[])
tasks.add('task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task2')
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task3')
self.assertEquals(len(tasks.data['tasks']), 3)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
def test_get(self):
tasks = self._load(tasks=[{"title": "task1", "created": 1000}],
current=None, archive=[])
self.assertEqual(tasks.get(), 'task1')
for x in range(2, 100):
tasks.add('task%d' % x)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertEquals(tasks.get(), 'task1')
tasks.done(closed=3000)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertNotEquals(tasks.get(), 'task1')
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][0]['duration'], 2000)
def test_done(self):
tasks = self._load(tasks=[], current=None, archive=[])
tasks.add('task1')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 0)
tasks.add('task2')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(len(tasks.data['archive']), 0)
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task2')
self.assertEquals(len(tasks.data['archive']), 1)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
tasks.get()
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 0)
self.assertEquals(len(tasks.data['archive']), 2)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][1]['title'], 'task2')
def test_skip(self):
tasks = self._load(tasks=[{"title": "task1"},
{"title": "task2"},
{"title": "task3"}],
current=None)
current = tasks.get()
for i in range(4):
tasks.skip()
new = tasks.get()
self.assertNotEquals(current, new)
current = new
def test_cli(self):
tmp_path = self._create_db(current=None, tasks=[], archive=[]).name
os.environ['ONETASK_DB'] = tmp_path
executable = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'bin', 'onetask'))
self.assertCommandOK([executable])
self.assertCommandOK([executable, 'add', 'plop'])
self.assertEquals(check_output([executable, 'get']), b'plop\n')
self.assertCommandOK([executable, 'done'])
self.assertCommandKO([executable, 'get'])
if __name__ == '__main__':
unittest.main()
| mit | -1,090,320,991,968,640,100 | 38.198198 | 80 | 0.580326 | false |
conejoninja/xbmc-seriesly | library_service.py | 1 | 4954 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
import sys
import xbmc,time
from core import scrapertools
from core import config
from core import logger
from core.item import Item
from servers import servertools
logger.info("[library_service.py] Actualizando series...")
from platformcode.xbmc import library
from platformcode.xbmc import launcher
import xbmcgui
#Eliminar carpeta antes de actualizar
directorio = os.path.join(config.get_library_path(),"SERIES")
logger.info ("directorio="+directorio)
import shutil
#if os.path.exists(directorio):
# shutil.rmtree(directorio)
if not os.path.exists(directorio):
os.mkdir(directorio)
nombre_fichero_config_canal = os.path.join( config.get_data_path() , "series.xml" )
try:
if config.get_setting("updatelibrary")=="true":
config_canal = open( nombre_fichero_config_canal , "r" )
for serie in config_canal.readlines():
logger.info("[library_service.py] serie="+serie)
serie = serie.split(",")
ruta = os.path.join( config.get_library_path() , "SERIES" , serie[0] )
logger.info("[library_service.py] ruta =#"+ruta+"#")
if os.path.exists( ruta ):
logger.info("[library_service.py] Actualizando "+serie[0])
item = Item(url=serie[1], show=serie[0])
try:
itemlist = []
if serie[2].strip()=='veranime':
from seriesly.channels import veranime
itemlist = veranime.episodios(item)
if serie[2].strip()=='tumejortv':
from seriesly.channels import tumejortv
itemlist = tumejortv.findepisodios(item)
if serie[2].strip()=='shurweb':
from seriesly.channels import shurweb
itemlist = shurweb.episodios(item)
if serie[2].strip()=='seriespepito':
from seriesly.channels import seriespepito
itemlist = seriespepito.episodios(item)
if serie[2].strip()=='seriesyonkis':
from seriesly.channels import seriesyonkis
itemlist = seriesyonkis.episodios(item)
if serie[2].strip()=='seriesly':
from seriesly.channels import seriesly
itemlist = seriesly.episodios(item)
if serie[2].strip()=='cuevana':
from seriesly.channels import cuevana
itemlist = cuevana.episodios(item)
if serie[2].strip()=='animeflv':
from seriesly.channels import animeflv
itemlist = animeflv.episodios(item)
if serie[2].strip()=='animeid':
from seriesly.channels import animeid
itemlist = animeid.episodios(item)
if serie[2].strip()=='moviezet':
from seriesly.channels import moviezet
itemlist = moviezet.serie(item)
except:
import traceback
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
itemlist = []
else:
logger.info("[library_service.py] No actualiza "+serie[0]+" (no existe el directorio)")
itemlist=[]
for item in itemlist:
#logger.info("item="+item.tostring())
try:
item.show=serie[0].strip()
library.savelibrary( titulo=item.title , url=item.url , thumbnail=item.thumbnail , server=item.server , plot=item.plot , canal=item.channel , category="Series" , Serie=item.show , verbose=False, accion="play_from_library", pedirnombre=False, subtitle=item.subtitle )
except:
logger.info("[library_service.py] Capitulo no valido")
import xbmc
xbmc.executebuiltin('UpdateLibrary(video)')
else:
logger.info("No actualiza la biblioteca, está desactivado en la configuración de seriesly")
except:
logger.info("[library_service.py] No hay series para actualizar")
| gpl-3.0 | -8,410,030,053,769,435,000 | 43.018182 | 286 | 0.528675 | false |
looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Tool/ifort.py | 1 | 3365 | """SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 4369 2009/09/19 15:58:29 scons"
import string
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] == 'posix':
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| isc | -7,407,020,927,223,222,000 | 36.388889 | 89 | 0.685587 | false |
bitglue/shinysdr | shinysdr/plugins/vor/__init__.py | 1 | 9491 | # Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
# TODO: fully clean up this GRC-generated file
from __future__ import absolute_import, division
import math
import os.path
from twisted.web import static
from zope.interface import implementer
from gnuradio import analog
from gnuradio import blocks
from gnuradio import fft
from gnuradio import gr
from gnuradio import filter as grfilter # don't shadow builtin
from gnuradio.filter import firdes
from shinysdr.filters import make_resampler
from shinysdr.interfaces import ClientResourceDef, ModeDef, IDemodulator, IModulator
from shinysdr.plugins.basic_demod import SimpleAudioDemodulator, design_lofi_audio_filter
from shinysdr.signals import SignalType
from shinysdr.types import QuantityT, RangeT
from shinysdr import units
from shinysdr.values import ExportedState, exported_value, setter
audio_modulation_index = 0.07
fm_subcarrier = 9960
fm_deviation = 480
@implementer(IDemodulator)
class VOR(SimpleAudioDemodulator):
def __init__(self, mode='VOR', zero_point=59, **kwargs):
self.channel_rate = channel_rate = 40000
internal_audio_rate = 20000 # TODO over spec'd
self.zero_point = zero_point
transition = 5000
SimpleAudioDemodulator.__init__(self,
mode=mode,
audio_rate=internal_audio_rate,
demod_rate=channel_rate,
band_filter=fm_subcarrier * 1.25 + fm_deviation + transition / 2,
band_filter_transition=transition,
**kwargs)
self.dir_rate = dir_rate = 10
if internal_audio_rate % dir_rate != 0:
raise ValueError('Audio rate %s is not a multiple of direction-finding rate %s' % (internal_audio_rate, dir_rate))
self.dir_scale = dir_scale = internal_audio_rate // dir_rate
self.audio_scale = audio_scale = channel_rate // internal_audio_rate
self.zeroer = blocks.add_const_vff((zero_point * (math.pi / 180), ))
self.dir_vector_filter = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, dir_rate, 1, 2, firdes.WIN_HAMMING, 6.76))
self.am_channel_filter_block = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, channel_rate, 5000, 5000, firdes.WIN_HAMMING, 6.76))
self.goertzel_fm = fft.goertzel_fc(channel_rate, dir_scale * audio_scale, 30)
self.goertzel_am = fft.goertzel_fc(internal_audio_rate, dir_scale, 30)
self.fm_channel_filter_block = grfilter.freq_xlating_fir_filter_ccc(1, (firdes.low_pass(1.0, channel_rate, fm_subcarrier / 2, fm_subcarrier / 2, firdes.WIN_HAMMING)), fm_subcarrier, channel_rate)
self.multiply_conjugate_block = blocks.multiply_conjugate_cc(1)
self.complex_to_arg_block = blocks.complex_to_arg(1)
self.am_agc_block = analog.feedforward_agc_cc(1024, 1.0)
self.am_demod_block = analog.am_demod_cf(
channel_rate=channel_rate,
audio_decim=audio_scale,
audio_pass=5000,
audio_stop=5500,
)
self.fm_demod_block = analog.quadrature_demod_cf(1)
self.phase_agc_fm = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.phase_agc_am = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.probe = blocks.probe_signal_f()
self.audio_filter_block = grfilter.fir_filter_fff(1, design_lofi_audio_filter(internal_audio_rate, False))
##################################################
# Connections
##################################################
# Input
self.connect(
self,
self.band_filter_block)
# AM chain
self.connect(
self.band_filter_block,
self.am_channel_filter_block,
self.am_agc_block,
self.am_demod_block)
# AM audio
self.connect(
self.am_demod_block,
blocks.multiply_const_ff(1.0 / audio_modulation_index * 0.5),
self.audio_filter_block)
self.connect_audio_output(self.audio_filter_block)
# AM phase
self.connect(
self.am_demod_block,
self.goertzel_am,
self.phase_agc_am,
(self.multiply_conjugate_block, 0))
# FM phase
self.connect(
self.band_filter_block,
self.fm_channel_filter_block,
self.fm_demod_block,
self.goertzel_fm,
self.phase_agc_fm,
(self.multiply_conjugate_block, 1))
# Phase comparison and output
self.connect(
self.multiply_conjugate_block,
self.dir_vector_filter,
self.complex_to_arg_block,
blocks.multiply_const_ff(-1), # opposite angle conventions
self.zeroer,
self.probe)
@exported_value(type=QuantityT(units.degree), changes='this_setter', label='Zero')
def get_zero_point(self):
return self.zero_point
@setter
def set_zero_point(self, zero_point):
self.zero_point = zero_point
self.zeroer.set_k((self.zero_point * (math.pi / 180), ))
# TODO: Have a dedicated angle type which can be specified as referenced to true/magnetic north
@exported_value(type=QuantityT(units.degree), changes='continuous', label='Bearing')
def get_angle(self):
return self.probe.level()
@implementer(IModulator)
class VORModulator(gr.hier_block2, ExportedState):
__vor_sig_freq = 30
__audio_rate = 10000
__rf_rate = 30000 # needs to be above fm_subcarrier * 2
def __init__(self, context, mode, angle=0.0):
gr.hier_block2.__init__(
self, 'SimulatedDevice VOR modulator',
gr.io_signature(1, 1, gr.sizeof_float * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
self.__angle = 0.0 # dummy statically visible value will be overwritten
# TODO: My signal level parameters are probably wrong because this signal doesn't look like a real VOR signal
vor_30 = analog.sig_source_f(self.__audio_rate, analog.GR_COS_WAVE, self.__vor_sig_freq, 1, 0)
vor_add = blocks.add_cc(1)
vor_audio = blocks.add_ff(1)
# Audio/AM signal
self.connect(
vor_30,
blocks.multiply_const_ff(0.3), # M_n
(vor_audio, 0))
self.connect(
self,
blocks.multiply_const_ff(audio_modulation_index), # M_i
(vor_audio, 1))
# Carrier component
self.connect(
analog.sig_source_c(0, analog.GR_CONST_WAVE, 0, 0, 1),
(vor_add, 0))
# AM component
self.__delay = blocks.delay(gr.sizeof_gr_complex, 0) # configured by set_angle
self.connect(
vor_audio,
make_resampler(self.__audio_rate, self.__rf_rate), # TODO make a complex version and do this last
blocks.float_to_complex(1),
self.__delay,
(vor_add, 1))
# FM component
vor_fm_mult = blocks.multiply_cc(1)
self.connect( # carrier generation
analog.sig_source_f(self.__rf_rate, analog.GR_COS_WAVE, fm_subcarrier, 1, 0),
blocks.float_to_complex(1),
(vor_fm_mult, 1))
self.connect( # modulation
vor_30,
make_resampler(self.__audio_rate, self.__rf_rate),
analog.frequency_modulator_fc(2 * math.pi * fm_deviation / self.__rf_rate),
blocks.multiply_const_cc(0.3), # M_d
vor_fm_mult,
(vor_add, 2))
self.connect(
vor_add,
self)
# calculate and initialize delay
self.set_angle(angle)
@exported_value(type=RangeT([(0, 2 * math.pi)], unit=units.degree, strict=False), changes='this_setter', label='Bearing')
def get_angle(self):
return self.__angle
@setter
def set_angle(self, value):
value = float(value)
compensation = math.pi / 180 * -6.5 # empirical, calibrated against VOR receiver (and therefore probably wrong)
value = value + compensation
value = value % (2 * math.pi)
phase_shift = int(self.__rf_rate / self.__vor_sig_freq * (value / (2 * math.pi)))
self.__delay.set_dly(phase_shift)
self.__angle = value
def get_input_type(self):
return SignalType(kind='MONO', sample_rate=self.__audio_rate)
def get_output_type(self):
return SignalType(kind='IQ', sample_rate=self.__rf_rate)
# Twisted plugin exports
pluginMode = ModeDef(mode='VOR',
info='VOR',
demod_class=VOR,
mod_class=VORModulator)
pluginClient = ClientResourceDef(
key=__name__,
resource=static.File(os.path.join(os.path.split(__file__)[0], 'client')),
load_js_path='vor.js')
| gpl-3.0 | 5,031,653,032,306,906,000 | 37.738776 | 203 | 0.611632 | false |
zhexiao/ezhost | docs_en/conf.py | 1 | 9557 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ezhost documentation build configuration file, created by
# sphinx-quickstart on Wed May 25 11:10:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ezhost'
copyright = '2016, Zhe Xiao'
author = 'Zhe Xiao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.5'
# The full version, including alpha/beta/rc tags.
release = '1.2.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'ezhost v1.2.5'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ezhostdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ezhost.tex', 'ezhost Documentation',
'Zhe Xiao', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ezhost', 'ezhost Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ezhost', 'ezhost Documentation',
author, 'ezhost', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
github_url = 'https://github.com/zhexiao/ezhost'
| mit | 8,173,505,382,048,362,000 | 31.729452 | 80 | 0.705556 | false |
ctgriffiths/twister | installer/installer_client.py | 1 | 8553 |
# version: 3.005
# File: installer.py ; This file is part of Twister.
# Copyright (C) 2012-2013 , Luxoft
# Authors:
# Andrei Costachi <[email protected]>
# Cristi Constantin <[email protected]>
# Mihai Dobre <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Twister Installer
=================
Requires Python 2.7 and a Linux machine. The installer doesn't run on Windows!
When installing Twister for the first time, you must run install_dependencies first.
Twister Client will be installed in the home of your user, in the folder `twister`.
'''
import os, sys
import binascii
import shutil
import subprocess
from string import Template
from distutils import file_util
from distutils import dir_util
__dir__ = os.path.split(__file__)[0]
if __dir__: os.chdir(__dir__)
# --------------------------------------------------------------------------------------------------
# Install Client ?
# --------------------------------------------------------------------------------------------------
def userHome(user):
return subprocess.check_output('echo ~' + user, shell=True).strip()
try:
user_name = os.getenv('USER')
if user_name=='root':
user_name = os.getenv('SUDO_USER')
if not user_name:
print('Cannot guess the Username! Exiting!\n')
exit(1)
except:
print('Cannot guess the Username! Exiting!\n')
exit(1)
# --------------------------------------------------------------------------------------------------
# Previous installations of Twister
# --------------------------------------------------------------------------------------------------
# Twister client path
INSTALL_PATH = userHome(user_name) + os.sep + 'twister/'
cfg_path = INSTALL_PATH + 'config/'
tmp_config = ''
print('Hello `{}` !\n'.format(user_name))
if os.path.exists(INSTALL_PATH):
print('WARNING! Another version of Twister is installed at `%s`!' % INSTALL_PATH)
print('If you continue, all files from that folder will be PERMANENTLY DELETED,')
print('Only the `config` folder will be saved!')
selected = raw_input('Are you sure you want to continue? (yes/no): ')
if selected.strip().lower() in ['y', 'yes']:
# Backup CONFIG folder for client
if os.path.isdir(cfg_path):
if os.getuid() != 0: # Normal user
tmp_config = userHome(user_name) + '/.twister/'
else: # ROOT user
tmp_config = '/tmp/twister_client_config/'
# Remove old tmp config
if os.path.isdir(tmp_config):
shutil.rmtree(tmp_config)
print('\nBack-up config folder (from `{}` to `{}`)...'.format(cfg_path, tmp_config))
try:
shutil.move(cfg_path, tmp_config)
except Exception as e:
print('\nInsuficient rights to move the config folder `{}`!\n'
'The installation cannot continue if you don\'t have permissions to move that folder!\n'.format(cfg_path))
exit(1)
# Deleting previous versions of Twister
try: dir_util.remove_tree(INSTALL_PATH)
except:
print('Error! Cannot delete Twister dir `{}` !'.format(INSTALL_PATH))
try: os.mkdir(INSTALL_PATH)
except:
print('Error! Cannot create Twister dir `{}` !'.format(INSTALL_PATH))
print('You probably don\'t have enough privileges to read and write in `{}` !\n'.format(INSTALL_PATH))
exit(1)
else:
print('\nPlease backup your data, then restart the installer.')
print('Exiting.\n')
exit(0)
# --------------------------------------------------------------------------------------------------
# Start copying files
# --------------------------------------------------------------------------------------------------
# Files to move in Client folder
to_copy = [
'bin/cli.py',
'bin/start_client',
'bin/start_client.py',
'bin/start_packet_sniffer.py',
'doc/',
'demo/',
'config/',
'client/',
'services/PacketSniffer/',
'services/__init__.py',
'common/__init__.py',
'common/constants.py',
'common/suitesmanager.py',
'common/configobj.py',
'common/jython/',
]
ROOT_FOLDER = os.sep.join( os.getcwd().split(os.sep)[:-1] )
cwd_path = os.getcwd() + os.sep
pkg_path = cwd_path + 'packages/'
print('')
for fname in to_copy:
fpath = ROOT_FOLDER + os.sep + fname
dpath = os.path.dirname(fname)
if dpath and ( not os.path.exists(INSTALL_PATH+dpath) ):
try:
dir_util.mkpath(INSTALL_PATH + dpath)
print('Created folder structure `%s`.' % (INSTALL_PATH+dpath))
except:
print('Cannot create folder `%s`!' % (INSTALL_PATH+dpath))
if os.path.isdir(fpath):
try:
dir_util.copy_tree(fpath, INSTALL_PATH + dpath)
print('Copied dir `%s` to `%s`.' % (fpath, INSTALL_PATH+dpath))
except:
print('Cannot copy dir `%s` to `%s`!' % (fpath, INSTALL_PATH+dpath))
elif os.path.isfile(fpath):
try:
file_util.copy_file(fpath, INSTALL_PATH + dpath)
print('Copied file `%s` to `%s`.' % (fpath, INSTALL_PATH+dpath))
except:
print('Cannot copy file `%s` to `%s`!' % (fpath, INSTALL_PATH+dpath))
else:
print('Path `{}` does not exist and will not be copied!'.format(fpath))
# Create cache and logs folders
try: os.mkdir(INSTALL_PATH + '/.twister_cache')
except: pass
try: os.mkdir(INSTALL_PATH + '/logs')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/sut')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/predefined')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/test_config')
except: pass
# Delete Server config files...
try: os.remove(INSTALL_PATH +os.sep+ 'config/resources.json')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/services.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/server_init.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/users_and_groups.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/shared_db.xml')
except: pass
# Restore CONFIG folder, if any
if os.path.exists(tmp_config):
print('\nMoving `config` folder back (from `{}` to `{}`)...'.format(tmp_config, cfg_path))
for xname in os.listdir(tmp_config):
src_name = tmp_config + xname
dst_name = cfg_path + xname
if os.path.isfile(dst_name):
os.remove(dst_name)
elif os.path.isdir(dst_name):
shutil.rmtree(dst_name)
print('Restoring config `{}`.'.format(dst_name))
shutil.move(src_name, cfg_path)
# Change owner for install folder...
if os.getuid() == 0:
tcr_proc = subprocess.Popen(['chown', user_name+':'+user_name, INSTALL_PATH, '-R'],)
tcr_proc.wait()
tcr_proc = subprocess.Popen(['chmod', '775', INSTALL_PATH, '-R'],)
tcr_proc.wait()
try:
tcr_proc = subprocess.Popen(['chmod', '777', INSTALL_PATH +os.sep+ 'logs', '-R'],)
tcr_proc.wait()
except:
print('Cannot CHMOD 777 the logs folder!')
for ext in ['txt', 'xml', 'py', 'tcl', 'plx', 'json', 'ini', 'htm', 'js', 'css']:
os.system('find %s -name "*.%s" -exec chmod 664 {} \;' % (INSTALL_PATH, ext))
# Make executables
os.system('find %s -name "cli.py" -exec chmod +x {} \;' % INSTALL_PATH)
os.system('find %s -name "start_client" -exec chmod +x {} \;' % INSTALL_PATH)
# Fix FWM Config XML
fwm = Template( open(INSTALL_PATH + 'config/fwmconfig.xml', 'r').read() )
open(INSTALL_PATH + 'config/fwmconfig.xml', 'w').write( fwm.substitute(HOME=userHome(user_name)) )
del fwm
# Check user's encr key
user_key = '{}config/twister.key'.format(INSTALL_PATH)
if os.path.isfile(user_key) and open(user_key).read():
print('User key ok.')
else:
print('Generating new user key...')
with open(user_key, 'w') as f:
f.write(binascii.hexlify(os.urandom(16)))
print('User key saved in "config/twister.key". Don\'t change this file!')
print('\nTwister installation done!\n')
| apache-2.0 | -6,713,722,362,416,530,000 | 32.410156 | 128 | 0.584707 | false |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/utilities/fit.py | 1 | 2484 | """
``revscoring fit -h``
::
Fits a dependent (an extractable value like a Datasource or Feature) to
observed data. These are often used along with bag-of-words
methods to reduce the feature space prior to training and testing a model
or to train a sub-model.
Usage:
fit -h | --help
fit <dependent> <label>
[--input=<path>]
[--datasource-file=<path>]
[--debug]
Options:
-h --help Prints this documentation
<dependent> The classpath to `Dependent`
that can be fit to observations
<label> The label that should be predicted
--input=<path> Path to a file containing observations
[default: <stdin>]
--datasource-file=<math> Path to a file for writing out the trained
datasource [default: <stdout>]
--debug Print debug logging.
"""
import logging
import sys
import docopt
import yamlconf
from ..dependencies import solve
from .util import read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
dependent = yamlconf.import_path(args['<dependent>'])
label_name = args['<label>']
if args['--input'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--input']))
logger.info("Reading observations...")
value_labels = [
(list(solve(dependent.dependencies, cache=ob['cache'])),
ob[label_name])
for ob in observations]
logger.debug(" -- {0} observations gathered".format(len(value_labels)))
if args['--datasource-file'] == "<stdout>":
datasource_f = sys.stdout
else:
datasource_f = open(args['--datasource-file'], 'w')
debug = args['--debug']
run(dependent, label_name, value_labels, datasource_f, debug)
def run(dependent, label_name, value_labels, datasource_f, debug):
logger.info("Fitting {0} ({1})".format(dependent, type(dependent)))
dependent.fit(value_labels)
logger.info("Writing fitted selector to {0}".format(datasource_f))
dependent.dump(datasource_f)
| mit | -7,085,234,002,436,981,000 | 30.846154 | 77 | 0.593398 | false |
stensonowen/spim-grader | spim-grader.py | 2 | 3172 | #!/usr/bin/python
'''
SPIM Auto-grader
Owen Stenson
Grades every file in the 'submissions' folder using every test in the 'samples' folder.
Writes to 'results' folder.
'''
import os, time, re
from subprocess import Popen, PIPE, STDOUT
def run(fn, sample_input='\n'):
#start process and write input
proc = Popen(["spim", "-file", "submissions/"+fn], stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sample_input[-1:] != '\n':
print "Warning: last line (of file below) must end with newline char to be submitted. Assuming it should..."
sample_input = sample_input + '\n'
proc.stdin.write(sample_input)
return proc
def grade(p, f):
#arg = process running homework file, file to write results to
print "Writing to ", f
f = open("results/" + f, 'w')
time.sleep(.1)
if p.poll() is None:
#process is either hanging or being slow
time.sleep(5)
if p.poll() is None:
p.kill()
f.write("Process hung; no results to report\n")
f.close()
return
output = p.stdout.read()
#remove output header
hdrs = []
hdrs.append(re.compile("SPIM Version .* of .*\n"))
hdrs.append(re.compile("Copyright .*, James R. Larus.\n"))
hdrs.append(re.compile("All Rights Reserved.\n"))
hdrs.append(re.compile("See the file README for a full copyright notice.\n"))
hdrs.append(re.compile("Loaded: .*/spim/.*\n"))
for hdr in hdrs:
output = re.sub(hdr, "", output)
errors = p.stderr.read()
if errors == "":
f.write("\t**PROCESS COMPLETED**\n")
f.write(output + '\n'*2)
else:
f.write("\t**PROCESS FAILED TO COMPILE**\n")
f.write(output + '\n' + errors + '\n'*2)
f.close()
def generate_filename(submission, sample):
#extract RCS id from submission title
try:
rcs_start = submission.index('_') + 1
rcs_end = min(submission.index('attempt'), submission.index('.')) - 1
rcs = submission[rcs_start:rcs_end]
except:
rcs = submission
return rcs + '__' + sample
def main():
#no use in running if content directories aren't present
assert os.path.isdir("samples")
assert os.path.isdir("submissions")
if os.path.isdir("results") is False:
assert os.path.isfile("results") == False
os.makedirs("results")
#cycle through files to grade:
for submission in os.listdir('submissions'):
#cycle through samples to test (ignore .example):
for sample in os.listdir('samples'):
#ignore example files
if submission == ".example" or sample == ".example":
continue
sample_file = open('samples/'+sample, 'r')
#read sample input; fix windows EOL char
sample_input = sample_file.read()
sample_input = sample_input.replace('\r', '')
#create process
p = run(submission, sample_input)
output_file = generate_filename(submission, sample)
grade(p, output_file)
if __name__ == "__main__":
main()
| gpl-2.0 | 1,135,793,950,941,566,300 | 34.45977 | 116 | 0.57913 | false |
FCP-INDI/C-PAC | CPAC/utils/tests/test_datasource.py | 1 | 2537 |
import os
import json
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
from CPAC.utils.test_resources import setup_test_wf
from CPAC.utils.datasource import match_epi_fmaps
def test_match_epi_fmaps():
# good data to use
s3_prefix = "s3://fcp-indi/data/Projects/HBN/MRI/Site-CBIC/sub-NDARAB708LM5"
s3_paths = [
"func/sub-NDARAB708LM5_task-rest_run-1_bold.json",
"fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.nii.gz",
"fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.json",
"fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.nii.gz",
"fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.json"
]
wf, ds, local_paths = setup_test_wf(s3_prefix, s3_paths,
"test_match_epi_fmaps")
opposite_pe_json = local_paths["fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.json"]
same_pe_json = local_paths["fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.json"]
func_json = local_paths["func/sub-NDARAB708LM5_task-rest_run-1_bold.json"]
with open(opposite_pe_json, "r") as f:
opposite_pe_params = json.load(f)
with open(same_pe_json, "r") as f:
same_pe_params = json.load(f)
with open(func_json, "r") as f:
func_params = json.load(f)
bold_pedir = func_params["PhaseEncodingDirection"]
fmap_paths_dct = {"epi_PA":
{"scan": local_paths["fmap/sub-NDARAB708LM5_dir-PA_acq-fMRI_epi.nii.gz"],
"scan_parameters": opposite_pe_params},
"epi_AP":
{"scan": local_paths["fmap/sub-NDARAB708LM5_dir-AP_acq-fMRI_epi.nii.gz"],
"scan_parameters": same_pe_params}
}
match_fmaps = \
pe.Node(util.Function(input_names=['fmap_dct',
'bold_pedir'],
output_names=['opposite_pe_epi',
'same_pe_epi'],
function=match_epi_fmaps,
as_module=True),
name='match_epi_fmaps')
match_fmaps.inputs.fmap_dct = fmap_paths_dct
match_fmaps.inputs.bold_pedir = bold_pedir
ds.inputs.func_json = func_json
ds.inputs.opposite_pe_json = opposite_pe_json
ds.inputs.same_pe_json = same_pe_json
wf.connect(match_fmaps, 'opposite_pe_epi', ds, 'should_be_dir-PA')
wf.connect(match_fmaps, 'same_pe_epi', ds, 'should_be_dir-AP')
wf.run()
| bsd-3-clause | -464,176,579,860,499,840 | 37.439394 | 99 | 0.573906 | false |
elishowk/flaskexperiment | commonecouteserver/data/__init__.py | 1 | 6483 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 CommOnEcoute http://commonecoute.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>
from flask import abort
import riak
import uuid
from datetime import datetime
import os
DB_HOST = os.environ.get('COESERVER_DB_HOST') or '127.0.0.1'
DB_PORT = os.environ.get('COESERVER_DB_PORT') or 8087
DB_PORT = int(DB_PORT)
import logging
logger = logging.getLogger('coeserver')
class ObjectExistsException(Exception):
pass
class GenericBucket(object):
def __init__(self, bucketname, port=DB_PORT, host=DB_HOST):
"""
initiate a riak bucket
"""
self.bucketname = bucketname
self._connect(bucketname, port, host)
def _connect(self, bucketname, port, host):
"""
Connects to a particular bucket
on the defaut port of riak protobuf interface
"""
#print "connecting to %s on port %d"%(host, port)
self.client = riak.RiakClient(host=host, port=port, transport_class=riak.RiakPbcTransport)
#self.client.set_r(1)
#self.client.set_w(1)
self.bucket = self.client.bucket(bucketname)
def _encode(self, data):
"""
on the fly encoding
"""
encodeddata = {}
for (key, value) in data.iteritems():
if isinstance(value, unicode):
encodeddata[key] = value.encode('utf-8', 'replace')
else:
encodeddata[key] = value
return encodeddata
def _addLinks(self, object, links):
"""
add links to an object given a list of identifiers
"""
for linked_key in links:
linked_object = self.bucket.get(linked_key)
object.add_link(linked_object)
linked_object.add_link(object)
def _genID(self, data):
return "%s:::%s"%(datetime.utcnow().isoformat(), uuid.uuid4())
def _getNewObject(self, data):
if self.bucket.get(data['id_txt']).exists():
raise(ObjectExistsException())
else:
encodeddata = self._encode(data)
return self.bucket.new(encodeddata['id_txt'], encodeddata)
def create(self, data, links=[]):
"""
Supply a key to store data under
The 'data' can be any data Python's 'json' encoder can handle (except unicode values with protobuf)
Returns the json object created
"""
if not self.client.is_alive():
return {'response': {"error": "database is dead"}, 'statuscode': 500}
try:
if 'id_txt' not in data:
data['id_txt'] = self._genID(data)
new_object = self._getNewObject(data)
# eventually links to other objects
self._addLinks(new_object, links)
# Save the object to Riak.
return {'response':new_object.store().get_data()}
#return new_object.get_key()
except ObjectExistsException, existsexc:
return {'response': {"error": "record already exists"}, 'statuscode': 400}
def read(self, key):
"""
Returns json object for a given key
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
response = self.bucket.get(key).get_data()
if response is None:
abort(404)
return {'response': response }
def update(self, key, update_data, links=[]):
"""
Gets an updates an item for database
Returns the updated json object
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
update_object = self.bucket.get(key)
if not update_object.exists():
abort(404)
data = update_object.get_data()
data.update(update_data)
update_object.set_data(self._encode(data))
# eventually links to other objects
self._addLinks(update_object, links)
return {'response': update_object.get_data()} or {'response': {"error": "could not update record"}, 'statuscode': 404}
def delete(self, key):
"""
Deletes a record
"""
if isinstance(key, unicode):
key = key.encode('utf-8', 'replace')
response = self.bucket.get(key)
if not response.exists():
abort(404)
else:
response.delete()
def readallkeys(self):
return {'response': self.bucket.get_keys()}
class Track(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "track", *args, **kwargs)
def _genID(self, data):
return "%s:::%s:::%s"%(data['start_date'], data['end_date'], uuid.uuid4())
class Event(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "event", *args, **kwargs)
def _genID(self, data):
return "%s:::%s:::%s"%(data['start_date'], data['end_date'], uuid.uuid4())
class User(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "user", *args, **kwargs)
def _genID(self, data):
return data['email_txt']
class Post(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "post", *args, **kwargs)
class Product(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "product", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
class Genre(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "genre", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
class Artist(GenericBucket):
def __init__(self, *args, **kwargs):
GenericBucket.__init__(self, "artist", *args, **kwargs)
def _genID(self, data):
return "%s"%uuid.uuid4()
| agpl-3.0 | 1,303,229,853,120,405,800 | 31.742424 | 126 | 0.586303 | false |
alexherns/biotite-scripts | cluster_coverage.py | 1 | 2808 | #!/usr/bin/env python2.7
import sys, operator, argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='''Prints out the coverage values for each cluster, by sample and total.
Also lists number of hits in each cluster.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False,
epilog= '''TSV of features and as downloaded from ggkbase.
Scaffold_gene is in column 2.
Coverage value is in column 5.
Clusters file as generated from USEARCH
''')
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-c', help= 'clusters.uc', required=True, type=str)
required.add_argument('-t', help= 'features.tsv', required=True, type=str)
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', action="help", help="show this help message and exit")
args = parser.parse_args()
cluster_file= args.c
tsv_file= args.t
#Create a dictionary of feature:coverage values
#Read in the tsv of features
handle= open(tsv_file, "r")
feat2cov= {}
samples= []
for line in handle:
contig_features= line.strip().split("\t")
samples.append(contig_features[1].split("_scaffold")[0])
feature, coverage= contig_features[1], contig_features[4]
feat2cov[feature]= float(coverage)
samples= list(set(samples))
handle.close()
#Select all non-redundant cluster lines from file
clusters= [line.strip().split("\t") for line in open(cluster_file) if line[0] in ["H", "C"]]
#Extract unique list of all clusters
cluster_names= list(set([line[1]for line in clusters]))
#Dictionary of clusters:
# clust_dict[cluster_name: [clust1, ..., clustN]]
clust_dict= {}
for cluster in clusters:
if cluster[1] not in clust_dict:
clust_dict[cluster[1]]= []
clust_dict[cluster[1]].append(cluster)
#List to contain output lines
cov_list= []
for cluster in clust_dict:
#Each line in output, formatted as list
clustercov= [cluster]+[0]*(len(samples)+3)
for line in clust_dict[cluster]:
scaf= line[8]
#Append centroids
if line[0]=="C":
clustercov.append(scaf)
sample= scaf.split("_scaffold")[0]
if sample not in samples:
print "FAIL: SCAF", scaf
else:
clustercov[samples.index(sample)+1]+=feat2cov[scaf.split(" ")[0]]
#Number of samples with positive hits
clustercov[-2]= len([i for i in clustercov[1:-4] if i > 0])
#Number of hits
clustercov[-3]= len(clust_dict[cluster])
#Total (raw and not normalized) cluster coverage value
clustercov[-4]= sum(clustercov[1:-4])
cov_list.append(clustercov)
#Print header line
print "TAX\t"+"\t".join(samples)+"\tTotal\t#Hits\t#Samples\tCentroid"
#Print each line in output
print "\n".join(["\t".join([str(i) for i in row]) for row in cov_list])
| mit | 7,550,752,532,490,339,000 | 32.428571 | 118 | 0.691595 | false |
mackong/gitql | prettytable/prettytable.py | 1 | 54214 | #!/usr/bin/env python
#
# Copyright (c) 2009-2013, Luke Maurits <[email protected]>
# All rights reserved.
# With contributions from:
# * Chris Clark
# * Klein Stephane
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "0.7.2"
import copy
import csv
import random
import re
import sys
import textwrap
import itertools
import unicodedata
py3k = sys.version_info[0] >= 3
if py3k:
unicode = str
basestring = str
itermap = map
iterzip = zip
uni_chr = chr
from html.parser import HTMLParser
else:
itermap = itertools.imap
iterzip = itertools.izip
uni_chr = unichr
from HTMLParser import HTMLParser
if py3k and sys.version_info[1] >= 2:
from html import escape
else:
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
HEADER = 3
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
_re = re.compile("\033\[[0-9;]*m")
def _get_size(text):
lines = text.split("\n")
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return (width, height)
class PrettyTable(object):
def __init__(self, field_names=None, **kwargs):
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
valign - default valign for each row (None, "t", "m" or "b")
reversesort - True or False to sort in descending or ascending order"""
self.encoding = kwargs.get("encoding", "UTF-8")
# Data
self._field_names = []
self._align = {}
self._valign = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
# Options
self._options = "start end fields header border sortby reversesort sort_key attributes format hrules vrules".split()
self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split())
self._options.extend("vertical_char horizontal_char junction_char header_style valign xhtml print_empty".split())
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs["start"] or 0
self._end = kwargs["end"] or None
self._fields = kwargs["fields"] or None
if kwargs["header"] in (True, False):
self._header = kwargs["header"]
else:
self._header = True
self._header_style = kwargs["header_style"] or None
if kwargs["border"] in (True, False):
self._border = kwargs["border"]
else:
self._border = True
self._hrules = kwargs["hrules"] or FRAME
self._vrules = kwargs["vrules"] or ALL
self._sortby = kwargs["sortby"] or None
if kwargs["reversesort"] in (True, False):
self._reversesort = kwargs["reversesort"]
else:
self._reversesort = False
self._sort_key = kwargs["sort_key"] or (lambda x: x)
self._int_format = kwargs["int_format"] or {}
self._float_format = kwargs["float_format"] or {}
self._padding_width = kwargs["padding_width"] or 1
self._left_padding_width = kwargs["left_padding_width"] or None
self._right_padding_width = kwargs["right_padding_width"] or None
self._vertical_char = kwargs["vertical_char"] or self._unicode("|")
self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-")
self._junction_char = kwargs["junction_char"] or self._unicode("+")
if kwargs["print_empty"] in (True, False):
self._print_empty = kwargs["print_empty"]
else:
self._print_empty = True
self._format = kwargs["format"] or False
self._xhtml = kwargs["xhtml"] or False
self._attributes = kwargs["attributes"] or {}
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
value = unicode(value, self.encoding, "strict")
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == "l":
return text + excess * " "
elif align == "r":
return excess * " " + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess//2)*" " + text + (excess//2 + 1)*" "
# and more space on left if text is of even length
else:
return (excess//2 + 1)*" " + text + (excess//2)*" "
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess//2)*" " + text + (excess//2)*" "
def __getattr__(self, name):
if name == "rowcount":
return len(self._rows)
elif name == "colcount":
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
new = PrettyTable()
new.field_names = self.field_names
for attr in self._options:
setattr(new, "_"+attr, getattr(self, "_"+attr))
setattr(new, "_align", getattr(self, "_align"))
if isinstance(index, slice):
for row in self._rows[index]:
new.add_row(row)
elif isinstance(index, int):
new.add_row(self._rows[index])
else:
raise Exception("Index %s is invalid, must be an integer or slice" % str(index))
return new
if py3k:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option in ("field_names"):
self._validate_field_names(val)
elif option in ("start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"):
self._validate_nonnegative_int(option, val)
elif option in ("sortby"):
self._validate_field_name(option, val)
elif option in ("sort_key"):
self._validate_function(option, val)
elif option in ("hrules"):
self._validate_hrules(option, val)
elif option in ("vrules"):
self._validate_vrules(option, val)
elif option in ("fields"):
self._validate_all_field_names(option, val)
elif option in ("header", "border", "reversesort", "xhtml", "print_empty"):
self._validate_true_or_false(option, val)
elif option in ("header_style"):
self._validate_header_style(val)
elif option in ("int_format"):
self._validate_int_format(option, val)
elif option in ("float_format"):
self._validate_float_format(option, val)
elif option in ("vertical_char", "horizontal_char", "junction_char"):
self._validate_single_char(option, val)
elif option in ("attributes"):
self._validate_attributes(option, val)
else:
raise Exception("Unrecognised option: %s!" % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (len(val), len(self._rows[0])))
# # Check for uniqueness
# try:
# assert len(val) == len(set(val))
# except AssertionError:
# raise Exception("Field names must be unique!")
def _validate_header_style(self, val):
try:
assert val in ("cap", "title", "upper", "lower", None)
except AssertionError:
raise Exception("Invalid header style, use cap, title, upper, lower or None!")
def _validate_align(self, val):
try:
assert val in ["l","c","r"]
except AssertionError:
raise Exception("Alignment %s is invalid, use l, c or r!" % val)
def _validate_valign(self, val):
try:
assert val in ["t","m","b",None]
except AssertionError:
raise Exception("Alignment %s is invalid, use t, m, b or None!" % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception("Invalid value for %s! Must be True or False." % name)
def _validate_int_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be an integer format string." % name)
def _validate_float_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert "." in val
bits = val.split(".")
assert len(bits) <= 2
assert bits[0] == "" or bits[0].isdigit()
assert bits[1] == "" or bits[1].isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be a float format string." % name)
def _validate_function(self, name, val):
try:
assert hasattr(val, "__call__")
except AssertionError:
raise Exception("Invalid value for %s! Must be a function." % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, HEADER or NONE." % name)
def _validate_vrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME, or NONE." % name)
def _validate_field_name(self, name, val):
try:
assert (val in self._field_names) or (val is None)
except AssertionError:
raise Exception("Invalid field name: %s!" % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception("fields must be a sequence of field names!")
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception("Invalid value for %s! Must be a string of length 1." % name)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception("attributes must be a dictionary of name/value pairs!")
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
return self._field_names
"""The names of the fields
Arguments:
fields - list or tuple of field names"""
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option("field_names", val)
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
if old_name not in self._align:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = "c"
if self._valign and old_names:
for old_name, new_name in zip(old_names, val):
self._valign[new_name] = self._valign[old_name]
for old_name in old_names:
if old_name not in self._valign:
self._valign.pop(old_name)
else:
for field in self._field_names:
self._valign[field] = "t"
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_valign(self):
return self._valign
def _set_valign(self, val):
self._validate_valign(val)
for field in self._field_names:
self._valign[field] = val
valign = property(_get_valign, _set_valign)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option("max_width", val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_fields(self):
"""List or tuple of field names to include in displays
Arguments:
fields - list or tuple of field names to include in displays"""
return self._fields
def _set_fields(self, val):
self._validate_option("fields", val)
self._fields = val
fields = property(_get_fields, _set_fields)
def _get_start(self):
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
def _set_start(self, val):
self._validate_option("start", val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
def _set_end(self, val):
self._validate_option("end", val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
def _set_sortby(self, val):
self._validate_option("sortby", val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by ascending order"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option("reversesort", val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option("sort_key", val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
def _set_header(self, val):
self._validate_option("header", val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
def _set_border(self, val):
self._validate_option("border", val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE"""
return self._hrules
def _set_hrules(self, val):
self._validate_option("hrules", val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_vrules(self):
"""Controls printing of vertical rules between columns
Arguments:
vrules - vertical rules style. Allowed values: FRAME, ALL, NONE"""
return self._vrules
def _set_vrules(self, val):
self._validate_option("vrules", val)
self._vrules = val
vrules = property(_get_vrules, _set_vrules)
def _get_int_format(self):
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
def _set_int_format(self, val):
# self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
def _set_float_format(self, val):
# self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option("padding_width", val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option("left_padding_width", val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option("right_padding_width", val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option("horizontal_char", val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
def _set_format(self, val):
self._validate_option("format", val)
self._format = val
format = property(_get_format, _set_format)
def _get_print_empty(self):
"""Controls whether or not empty tables produce a header and frame or just an empty string
Arguments:
print_empty - True or False"""
return self._print_empty
def _set_print_empty(self, val):
self._validate_option("print_empty", val)
self._print_empty = val
print_empty = property(_get_print_empty, _set_print_empty)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
def _set_attributes(self, val):
self._validate_option("attributes", val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, "_"+option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception("Invalid pre-set style!")
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self._vrules = ALL
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.horizontal_char = "-"
self.junction_char = "+"
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, HEADER, NONE))
self._vrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0,5)
self.right_padding_width = random.randint(0,5)
self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
if not self._field_names:
self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows)-1:
raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
del self._rows[row_index]
def add_column(self, fieldname, column, align="c", valign="t"):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right
valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(0, len(column)):
if len(self._rows) < i+1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(("%%%sd" % self._int_format[field]) % value)
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(("%%%sf" % self._float_format[field]) % value)
return self._unicode(value)
def _compute_widths(self, rows, options):
if options["header"]:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname]))
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options["left_padding_width"] is not None:
lpad = options["left_padding_width"]
else:
lpad = options["padding_width"]
if options["right_padding_width"] is not None:
rpad = options["right_padding_width"]
else:
rpad = options["padding_width"]
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
# Sort if necessary
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]]+row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options["border"] and options["hrules"] == FRAME:
lines.append(self._hrule)
return self._unicode("\n").join(lines)
def _stringify_hrule(self, options):
if not options["border"]:
return ""
lpad, rpad = self._get_padding_widths(options)
if options['vrules'] in (ALL, FRAME):
bits = [options["junction_char"]]
else:
bits = [options["horizontal_char"]]
# For tables with no data or fieldnames
if not self._field_names:
bits.append(options["junction_char"])
return "".join(bits)
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
bits.append((width+lpad+rpad)*options["horizontal_char"])
if options['vrules'] == ALL:
bits.append(options["junction_char"])
else:
bits.append(options["horizontal_char"])
if options["vrules"] == FRAME:
bits.pop()
bits.append(options["junction_char"])
return "".join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (ALL, FRAME):
bits.append(self._hrule)
bits.append("\n")
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
# For tables with no data or field names
if not self._field_names:
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options["border"] and options["vrules"] == FRAME:
bits.pop()
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] != NONE:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(range(0,len(row)), self._field_names, row, self._widths):
# Enforce max widths
lines = value.split("\n")
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = "\n".join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options["border"]:
if options["vrules"] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
for field, value, width, in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split("\n")
dHeight = row_height - len(lines)
if dHeight:
if valign == "m":
lines = [""] * int(dHeight / 2) + lines + [""] * (dHeight - int(dHeight / 2))
elif valign == "b":
lines = [""] * dHeight + lines
else:
lines = lines + [""] * dHeight
y = 0
for l in lines:
if options["fields"] and field not in options["fields"]:
continue
bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
y += 1
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
for y in range(0, row_height):
if options["border"] and options["vrules"] == FRAME:
bits[y].pop()
bits[y].append(options["vertical_char"])
if options["border"] and options["hrules"]== ALL:
bits[row_height-1].append("\n")
bits[row_height-1].append(self._hrule)
for y in range(0, row_height):
bits[y] = "".join(bits[y])
return "\n".join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th>%s</th>" % escape(field).replace("\n", linebreak))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum in zip(self._field_names, row):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td>%s</td>" % escape(datum).replace("\n", linebreak))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = []
open_tag.append("<table")
if options["border"]:
if options["hrules"] == ALL and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"all\"")
elif options["hrules"] == FRAME and options["vrules"] == FRAME:
open_tag.append(" frame=\"box\"")
elif options["hrules"] == FRAME and options["vrules"] == ALL:
open_tag.append(" frame=\"box\" rules=\"cols\"")
elif options["hrules"] == FRAME:
open_tag.append(" frame=\"hsides\"")
elif options["hrules"] == ALL:
open_tag.append(" frame=\"hsides\" rules=\"rows\"")
elif options["vrules"] == FRAME:
open_tag.append(" frame=\"vsides\"")
elif options["vrules"] == ALL:
open_tag.append(" frame=\"vsides\" rules=\"cols\"")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (lpad, rpad, escape(field).replace("\n", linebreak)))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
valigns = []
for field in self._field_names:
aligns.append({ "l" : "left", "r" : "right", "c" : "center" }[self._align[field]])
valigns.append({"t" : "top", "m" : "middle", "b" : "bottom"}[self._valign[field]])
for row in formatted_rows:
lines.append(" <tr>")
for field, datum, align, valign in zip(self._field_names, row, aligns, valigns):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s\">%s</td>" % (lpad, rpad, align, valign, escape(datum).replace("\n", linebreak)))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
#if char in xrange(0x0021, 0x007e):
#if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(itermap(_char_block_width, itermap(ord, _re.sub("", val))))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names = None, **kwargs):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
else:
if py3k:
table.field_names = [x.strip() for x in next(reader)]
else:
table.field_names = [x.strip() for x in reader.next()]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor, **kwargs):
if cursor.description:
table = PrettyTable(**kwargs)
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
class TableHandler(HTMLParser):
def __init__(self, **kwargs):
HTMLParser.__init__(self)
self.kwargs = kwargs
self.tables = []
self.last_row = []
self.rows = []
self.max_row_width = 0
self.active = None
self.last_content = ""
self.is_last_row_header = False
def handle_starttag(self,tag, attrs):
self.active = tag
if tag == "th":
self.is_last_row_header = True
def handle_endtag(self,tag):
if tag in ["th", "td"]:
stripped_content = self.last_content.strip()
self.last_row.append(stripped_content)
if tag == "tr":
self.rows.append(
(self.last_row, self.is_last_row_header))
self.max_row_width = max(self.max_row_width, len(self.last_row))
self.last_row = []
self.is_last_row_header = False
if tag == "table":
table = self.generate_table(self.rows)
self.tables.append(table)
self.rows = []
self.last_content = " "
self.active = None
def handle_data(self, data):
self.last_content += data
def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1,appends):
row[0].append("-")
if row[1] == True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table
def make_fields_unique(self, fields):
"""
iterates over the row and make each field unique
"""
for i in range(0, len(fields)):
for j in range(i+1, len(fields)):
if fields[i] == fields[j]:
fields[j] += "'"
def from_html(html_code, **kwargs):
"""
Generates a list of PrettyTables from a string of HTML code. Each <table> in
the HTML becomes one PrettyTable object.
"""
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception("More than one <table> in provided HTML code! Use from_html instead.")
return tables[0]
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
x.sortby = "Population"
x.reversesort = True
x.int_format["Area"] = "04d"
x.float_format = "6.1f"
x.align["City name"] = "l" # Left align city names
x.add_row(["Adelaide", 1295, 1158259, 600.5])
x.add_row(["Brisbane", 5905, 1857594, 1146.4])
x.add_row(["Darwin", 112, 120900, 1714.7])
x.add_row(["Hobart", 1357, 205556, 619.5])
x.add_row(["Sydney", 2058, 4336374, 1214.8])
x.add_row(["Melbourne", 1566, 3806092, 646.9])
x.add_row(["Perth", 5386, 1554769, 869.4])
print(x)
if __name__ == "__main__":
main()
| mit | 8,584,577,250,778,788,000 | 35.755254 | 207 | 0.569927 | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rpplugins/vxgi/voxelization_stage.py | 1 | 8394 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from rpcore.globals import Globals
from rpcore.image import Image
from rpcore.render_stage import RenderStage
from panda3d.core import Camera, OrthographicLens, NodePath, CullFaceAttrib
from panda3d.core import DepthTestAttrib, Vec4, PTALVecBase3, Vec3, SamplerState
from panda3d.core import ColorWriteAttrib
class VoxelizationStage(RenderStage):
""" This stage voxelizes the whole scene """
required_inputs = ["DefaultEnvmap", "AllLightsData", "maxLightIndex"]
required_pipes = []
# The different states of voxelization
S_disabled = 0
S_voxelize_x = 1
S_voxelize_y = 2
S_voxelize_z = 3
S_gen_mipmaps = 4
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self.voxel_resolution = 256
self.voxel_world_size = -1
self.state = self.S_disabled
self.create_ptas()
def set_grid_position(self, pos):
self.pta_next_grid_pos[0] = pos
def create_ptas(self):
self.pta_next_grid_pos = PTALVecBase3.empty_array(1)
self.pta_grid_pos = PTALVecBase3.empty_array(1)
@property
def produced_inputs(self):
return {"voxelGridPosition": self.pta_grid_pos}
@property
def produced_pipes(self):
return {"SceneVoxels": self.voxel_grid}
def create(self):
# Create the voxel grid used to generate the voxels
self.voxel_temp_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "RGBA8")
self.voxel_temp_grid.set_clear_color(Vec4(0))
self.voxel_temp_nrm_grid = Image.create_3d(
"VoxelsTemp", self.voxel_resolution, self.voxel_resolution,
self.voxel_resolution, "R11G11B10")
self.voxel_temp_nrm_grid.set_clear_color(Vec4(0))
# Create the voxel grid which is a copy of the temporary grid, but stable
self.voxel_grid = Image.create_3d(
"Voxels", self.voxel_resolution, self.voxel_resolution, self.voxel_resolution, "RGBA8")
self.voxel_grid.set_clear_color(Vec4(0))
self.voxel_grid.set_minfilter(SamplerState.FT_linear_mipmap_linear)
# Create the camera for voxelization
self.voxel_cam = Camera("VoxelizeCam")
self.voxel_cam.set_camera_mask(self._pipeline.tag_mgr.get_voxelize_mask())
self.voxel_cam_lens = OrthographicLens()
self.voxel_cam_lens.set_film_size(
-2.0 * self.voxel_world_size, 2.0 * self.voxel_world_size)
self.voxel_cam_lens.set_near_far(0.0, 2.0 * self.voxel_world_size)
self.voxel_cam.set_lens(self.voxel_cam_lens)
self.voxel_cam_np = Globals.base.render.attach_new_node(self.voxel_cam)
self._pipeline.tag_mgr.register_camera("voxelize", self.voxel_cam)
# Create the voxelization target
self.voxel_target = self.create_target("VoxelizeScene")
self.voxel_target.size = self.voxel_resolution
self.voxel_target.prepare_render(self.voxel_cam_np)
# Create the target which copies the voxel grid
self.copy_target = self.create_target("CopyVoxels")
self.copy_target.size = self.voxel_resolution
self.copy_target.prepare_buffer()
# TODO! Does not work with the new render target yet - maybe add option
# to post process region for instances?
self.copy_target.instance_count = self.voxel_resolution
self.copy_target.set_shader_input("SourceTex", self.voxel_temp_grid)
self.copy_target.set_shader_input("DestTex", self.voxel_grid)
# Create the target which generates the mipmaps
self.mip_targets = []
mip_size, mip = self.voxel_resolution, 0
while mip_size > 1:
mip_size, mip = mip_size // 2, mip + 1
mip_target = self.create_target("GenMipmaps:" + str(mip))
mip_target.size = mip_size
mip_target.prepare_buffer()
mip_target.instance_count = mip_size
mip_target.set_shader_input("SourceTex", self.voxel_grid)
mip_target.set_shader_input("sourceMip", mip - 1)
mip_target.set_shader_input("DestTex", self.voxel_grid, False, True, -1, mip, 0)
self.mip_targets.append(mip_target)
# Create the initial state used for rendering voxels
initial_state = NodePath("VXGIInitialState")
initial_state.set_attrib(CullFaceAttrib.make(CullFaceAttrib.M_cull_none), 100000)
initial_state.set_attrib(DepthTestAttrib.make(DepthTestAttrib.M_none), 100000)
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 100000)
self.voxel_cam.set_initial_state(initial_state.get_state())
Globals.base.render.set_shader_input("voxelGridPosition", self.pta_next_grid_pos)
Globals.base.render.set_shader_input("VoxelGridDest", self.voxel_temp_grid)
def update(self):
self.voxel_cam_np.show()
self.voxel_target.active = True
self.copy_target.active = False
for target in self.mip_targets:
target.active = False
# Voxelization disable
if self.state == self.S_disabled:
self.voxel_cam_np.hide()
self.voxel_target.active = False
# Voxelization from X-Axis
elif self.state == self.S_voxelize_x:
# Clear voxel grid
self.voxel_temp_grid.clear_image()
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(self.voxel_world_size, 0, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Y-Axis
elif self.state == self.S_voxelize_y:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, self.voxel_world_size, 0))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Voxelization from Z-Axis
elif self.state == self.S_voxelize_z:
self.voxel_cam_np.set_pos(
self.pta_next_grid_pos[0] + Vec3(0, 0, self.voxel_world_size))
self.voxel_cam_np.look_at(self.pta_next_grid_pos[0])
# Generate mipmaps
elif self.state == self.S_gen_mipmaps:
self.voxel_target.active = False
self.copy_target.active = True
self.voxel_cam_np.hide()
for target in self.mip_targets:
target.active = True
# As soon as we generate the mipmaps, we need to update the grid position
# as well
self.pta_grid_pos[0] = self.pta_next_grid_pos[0]
def reload_shaders(self):
self.copy_target.shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "copy_voxels.frag.glsl")
mip_shader = self.load_plugin_shader(
"/$$rp/shader/default_post_process_instanced.vert.glsl", "generate_mipmaps.frag.glsl")
for target in self.mip_targets:
target.shader = mip_shader
def set_shader_input(self, *args):
Globals.render.set_shader_input(*args)
| mit | -8,389,214,982,361,238,000 | 40.826531 | 99 | 0.644508 | false |
rameshg87/pyremotevbox | pyremotevbox/ZSI/twisted/WSsecurity.py | 1 | 13760 | ###########################################################################
# Joshua R. Boverhof, LBNL
# See Copyright for copyright notice!
# $Id: WSsecurity.py 1134 2006-02-24 00:23:06Z boverhof $
###########################################################################
import sys, time, warnings
import sha, base64
# twisted & related imports
from zope.interface import classProvides, implements, Interface
from twisted.python import log, failure
from twisted.web.error import NoResource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import reactor
import twisted.web.http
import twisted.web.resource
# ZSI imports
from pyremotevbox.ZSI import _get_element_nsuri_name, EvaluateException, ParseException
from pyremotevbox.ZSI.parse import ParsedSoap
from pyremotevbox.ZSI.writer import SoapWriter
from pyremotevbox.ZSI.TC import _get_global_element_declaration as GED
from pyremotevbox.ZSI import fault
from pyremotevbox.ZSI.wstools.Namespaces import OASIS, DSIG
from WSresource import DefaultHandlerChain, HandlerChainInterface,\
WSAddressCallbackHandler, DataHandler, WSAddressHandler
#
# Global Element Declarations
#
UsernameTokenDec = GED(OASIS.WSSE, "UsernameToken")
SecurityDec = GED(OASIS.WSSE, "Security")
SignatureDec = GED(DSIG.BASE, "Signature")
PasswordDec = GED(OASIS.WSSE, "Password")
NonceDec = GED(OASIS.WSSE, "Nonce")
CreatedDec = GED(OASIS.UTILITY, "Created")
if None in [UsernameTokenDec,SecurityDec,SignatureDec,PasswordDec,NonceDec,CreatedDec]:
raise ImportError, 'required global element(s) unavailable: %s ' %({
(OASIS.WSSE, "UsernameToken"):UsernameTokenDec,
(OASIS.WSSE, "Security"):SecurityDec,
(DSIG.BASE, "Signature"):SignatureDec,
(OASIS.WSSE, "Password"):PasswordDec,
(OASIS.WSSE, "Nonce"):NonceDec,
(OASIS.UTILITY, "Created"):CreatedDec,
})
#
# Stability: Unstable, Untested, Not Finished.
#
class WSSecurityHandler:
"""Web Services Security: SOAP Message Security 1.0
Class Variables:
debug -- If True provide more detailed SOAP:Fault information to clients.
"""
classProvides(HandlerChainInterface)
debug = True
@classmethod
def processRequest(cls, ps, **kw):
if type(ps) is not ParsedSoap:
raise TypeError,'Expecting ParsedSoap instance'
security = ps.ParseHeaderElements([cls.securityDec])
# Assume all security headers are supposed to be processed here.
for pyobj in security or []:
for any in pyobj.Any or []:
if any.typecode is UsernameTokenDec:
try:
ps = cls.UsernameTokenProfileHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Unauthorized Username/passphrase combination'
continue
if any.typecode is SignatureDec:
try:
ps = cls.SignatureHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Invalid Security Header'
continue
raise RuntimeError, 'WS-Security, Unsupported token %s' %str(any)
return ps
@classmethod
def processResponse(cls, output, **kw):
return output
class UsernameTokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
Class Variables:
targetNamespace --
"""
classProvides(HandlerChainInterface)
# Class Variables
targetNamespace = OASIS.WSSE
sweepInterval = 60*5
nonces = None
# Set to None to disable
PasswordText = targetNamespace + "#PasswordText"
PasswordDigest = targetNamespace + "#PasswordDigest"
# Override passwordCallback
passwordCallback = lambda cls,username: None
@classmethod
def sweep(cls, index):
"""remove nonces every sweepInterval.
Parameters:
index -- remove all nonces up to this index.
"""
if cls.nonces is None:
cls.nonces = []
seconds = cls.sweepInterval
cls.nonces = cls.nonces[index:]
reactor.callLater(seconds, cls.sweep, len(cls.nonces))
@classmethod
def processRequest(cls, ps, token, **kw):
"""
Parameters:
ps -- ParsedSoap instance
token -- UsernameToken pyclass instance
"""
if token.typecode is not UsernameTokenDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
UsernameTokenDec.nspname, UsernameTokenDec.pname)
username = token.Username
# expecting only one password
# may have a nonce and a created
password = nonce = timestamp = None
for any in token.Any or []:
if any.typecode is PasswordDec:
password = any
continue
if any.typecode is NonceTypeDec:
nonce = any
continue
if any.typecode is CreatedTypeDec:
timestamp = any
continue
raise TypeError, 'UsernameTokenProfileHander unexpected %s' %str(any)
if password is None:
raise RuntimeError, 'Unauthorized, no password'
# TODO: not yet supporting complexType simpleContent in pyclass_type
attrs = getattr(password, password.typecode.attrs_aname, {})
pwtype = attrs.get('Type', cls.PasswordText)
# Clear Text Passwords
if cls.PasswordText is not None and pwtype == cls.PasswordText:
if password == cls.passwordCallback(username):
return ps
raise RuntimeError, 'Unauthorized, clear text password failed'
if cls.nonces is None: cls.sweep(0)
if nonce is not None:
if nonce in cls.nonces:
raise RuntimeError, 'Invalid Nonce'
# created was 10 seconds ago or sooner
if created is not None and created < time.gmtime(time.time()-10):
raise RuntimeError, 'UsernameToken created is expired'
cls.nonces.append(nonce)
# PasswordDigest, recommended that implemenations
# require a Nonce and Created
if cls.PasswordDigest is not None and pwtype == cls.PasswordDigest:
digest = sha.sha()
for i in (nonce, created, cls.passwordCallback(username)):
if i is None: continue
digest.update(i)
if password == base64.encodestring(digest.digest()).strip():
return ps
raise RuntimeError, 'Unauthorized, digest failed'
raise RuntimeError, 'Unauthorized, contents of UsernameToken unknown'
@classmethod
def processResponse(cls, output, **kw):
return output
@staticmethod
def hmac_sha1(xml):
return
class SignatureHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
digestMethods = {
DSIG.BASE+"#sha1":sha.sha,
}
signingMethods = {
DSIG.BASE+"#hmac-sha1":hmac_sha1,
}
canonicalizationMethods = {
DSIG.C14N_EXCL:lambda node: Canonicalize(node, unsuppressedPrefixes=[]),
DSIG.C14N:lambda node: Canonicalize(node),
}
@classmethod
def processRequest(cls, ps, signature, **kw):
"""
Parameters:
ps -- ParsedSoap instance
signature -- Signature pyclass instance
"""
if token.typecode is not SignatureDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
SignatureDec.nspname, SignatureDec.pname)
si = signature.SignedInfo
si.CanonicalizationMethod
calgo = si.CanonicalizationMethod.get_attribute_Algorithm()
for any in si.CanonicalizationMethod.Any:
pass
# Check Digest
si.Reference
context = XPath.Context.Context(ps.dom, processContents={'wsu':OASIS.UTILITY})
exp = XPath.Compile('//*[@wsu:Id="%s"]' %si.Reference.get_attribute_URI())
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
try:
digest = cls.digestMethods[salgo]
except IndexError:
raise RuntimeError, 'unknown digestMethods Algorithm'
digestValue = base64.encodestring(digest(xml).digest()).strip()
if si.Reference.DigestValue != digestValue:
raise RuntimeError, 'digest does not match'
if si.Reference.Transforms:
pass
signature.KeyInfo
signature.KeyInfo.KeyName
signature.KeyInfo.KeyValue
signature.KeyInfo.RetrievalMethod
signature.KeyInfo.X509Data
signature.KeyInfo.PGPData
signature.KeyInfo.SPKIData
signature.KeyInfo.MgmtData
signature.KeyInfo.Any
signature.Object
# TODO: Check Signature
signature.SignatureValue
si.SignatureMethod
salgo = si.SignatureMethod.get_attribute_Algorithm()
if si.SignatureMethod.HMACOutputLength:
pass
for any in si.SignatureMethod.Any:
pass
# <SignedInfo><Reference URI="">
exp = XPath.Compile('//child::*[attribute::URI = "%s"]/..' %(
si.Reference.get_attribute_URI()))
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
# TODO: Check SignatureValue
@classmethod
def processResponse(cls, output, **kw):
return output
class X509TokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
targetNamespace = DSIG.BASE
# Token Types
singleCertificate = targetNamespace + "#X509v3"
certificatePath = targetNamespace + "#X509PKIPathv1"
setCerticatesCRLs = targetNamespace + "#PKCS7"
@classmethod
def processRequest(cls, ps, signature, **kw):
return ps
"""
<element name="KeyInfo" type="ds:KeyInfoType"/>
<complexType name="KeyInfoType" mixed="true">
<choice maxOccurs="unbounded">
<element ref="ds:KeyName"/>
<element ref="ds:KeyValue"/>
<element ref="ds:RetrievalMethod"/>
<element ref="ds:X509Data"/>
<element ref="ds:PGPData"/>
<element ref="ds:SPKIData"/>
<element ref="ds:MgmtData"/>
<any processContents="lax" namespace="##other"/>
<!-- (1,1) elements from (0,unbounded) namespaces -->
</choice>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="Signature" type="ds:SignatureType"/>
<complexType name="SignatureType">
<sequence>
<element ref="ds:SignedInfo"/>
<element ref="ds:SignatureValue"/>
<element ref="ds:KeyInfo" minOccurs="0"/>
<element ref="ds:Object" minOccurs="0" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="SignatureValue" type="ds:SignatureValueType"/>
<complexType name="SignatureValueType">
<simpleContent>
<extension base="base64Binary">
<attribute name="Id" type="ID" use="optional"/>
</extension>
</simpleContent>
</complexType>
<!-- Start SignedInfo -->
<element name="SignedInfo" type="ds:SignedInfoType"/>
<complexType name="SignedInfoType">
<sequence>
<element ref="ds:CanonicalizationMethod"/>
<element ref="ds:SignatureMethod"/>
<element ref="ds:Reference" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
"""
class WSSecurityHandlerChainFactory:
protocol = DefaultHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(WSAddressCallbackHandler, DataHandler,
WSSecurityHandler, WSAddressHandler())
| apache-2.0 | 5,371,576,649,039,104,000 | 34.372751 | 90 | 0.566061 | false |
o5k/openerp-oemedical-v0.1 | openerp/addons/smsclient/smsclient.py | 1 | 17026 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011 SYLEAM (<http://syleam.fr/>)
# Copyright (C) 2013 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import urllib
from openerp.osv import fields, orm
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
try:
from SOAPpy import WSDL
except :
_logger.warning("ERROR IMPORTING SOAPpy, if not installed, please install it:"
" e.g.: apt-get install python-soappy")
class partner_sms_send(orm.Model):
_name = "partner.sms.send"
def _default_get_mobile(self, cr, uid, fields, context=None):
if context is None:
context = {}
partner_pool = self.pool.get('res.partner')
active_ids = fields.get('active_ids')
res = {}
i = 0
for partner in partner_pool.browse(cr, uid, active_ids, context=context):
i += 1
res = partner.mobile
if i > 1:
raise orm.except_orm(_('Error'), _('You can only select one partner'))
return res
def _default_get_gateway(self, cr, uid, fields, context=None):
if context is None:
context = {}
sms_obj = self.pool.get('sms.smsclient')
gateway_ids = sms_obj.search(cr, uid, [], limit=1, context=context)
return gateway_ids and gateway_ids[0] or False
def onchange_gateway(self, cr, uid, ids, gateway_id, context=None):
if context is None:
context = {}
sms_obj = self.pool.get('sms.smsclient')
if not gateway_id:
return {}
gateway = sms_obj.browse(cr, uid, gateway_id, context=context)
return {
'value': {
'validity': gateway.validity,
'classes': gateway.classes,
'deferred': gateway.deferred,
'priority': gateway.priority,
'coding': gateway.coding,
'tag': gateway.tag,
'nostop': gateway.nostop,
}
}
_columns = {
'mobile_to': fields.char('To', size=256, required=True),
'app_id': fields.char('API ID', size=256),
'user': fields.char('Login', size=256),
'password': fields.char('Password', size=256),
'text': fields.text('SMS Message', required=True),
'gateway': fields.many2one('sms.smsclient', 'SMS Gateway', required=True),
'validity': fields.integer('Validity',
help='the maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class', help='the sms class: flash(0), phone display(1), SIM(2), toolkit(3)'),
'deferred': fields.integer('Deferred',
help='the time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0','0'),
('1','1'),
('2','2'),
('3','3')
], 'Priority', help='The priority of the message'),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
], 'Coding', help='The SMS coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256, help='an optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
}
_defaults = {
'mobile_to': _default_get_mobile,
'gateway': _default_get_gateway,
}
def sms_send(self, cr, uid, ids, context=None):
if context is None:
context = {}
client_obj = self.pool.get('sms.smsclient')
for data in self.browse(cr, uid, ids, context=context):
if not data.gateway:
raise orm.except_orm(_('Error'), _('No Gateway Found'))
else:
client_obj._send_message(cr, uid, data, context=context)
return {}
class SMSClient(orm.Model):
_name = 'sms.smsclient'
_description = 'SMS Client'
_columns = {
'name': fields.char('Gateway Name', size=256, required=True),
'url': fields.char('Gateway URL', size=256,
required=True, help='Base url for message'),
'property_ids': fields.one2many('sms.smsclient.parms',
'gateway_id', 'Parameters'),
'history_line': fields.one2many('sms.smsclient.history',
'gateway_id', 'History'),
'method': fields.selection([
('http', 'HTTP Method'),
('smpp', 'SMPP Method')
], 'API Method', select=True),
'state': fields.selection([
('new', 'Not Verified'),
('waiting', 'Waiting for Verification'),
('confirm', 'Verified'),
], 'Gateway Status', select=True, readonly=True),
'users_id': fields.many2many('res.users',
'res_smsserver_group_rel', 'sid', 'uid', 'Users Allowed'),
'code': fields.char('Verification Code', size=256),
'body': fields.text('Message',
help="The message text that will be send along with the email which is send through this server"),
'validity': fields.integer('Validity',
help='The maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class',
help='The SMS class: flash(0),phone display(1),SIM(2),toolkit(3)'),
'deferred': fields.integer('Deferred',
help='The time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3')
], 'Priority', help='The priority of the message '),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
],'Coding', help='The SMS coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256, help='an optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
'char_limit' : fields.boolean('Character Limit'),
}
_defaults = {
'state': 'new',
'method': 'http',
'validity': 10,
'classes': '1',
'deferred': 0,
'priority': '3',
'coding': '1',
'nostop': True,
'char_limit' : True,
}
def _check_permissions(self, cr, uid, id, context=None):
cr.execute('select * from res_smsserver_group_rel where sid=%s and uid=%s' % (id, uid))
data = cr.fetchall()
if len(data) <= 0:
return False
return True
def _prepare_smsclient_queue(self, cr, uid, data, name, context=None):
return {
'name': name,
'gateway_id': data.gateway.id,
'state': 'draft',
'mobile': data.mobile_to,
'msg': data.text,
'validity': data.validity,
'classes': data.classes,
'deffered': data.deferred,
'priorirty': data.priority,
'coding': data.coding,
'tag': data.tag,
'nostop': data.nostop,
}
def _send_message(self, cr, uid, data, context=None):
if context is None:
context = {}
gateway = data.gateway
if gateway:
if not self._check_permissions(cr, uid, gateway.id, context=context):
raise orm.except_orm(_('Permission Error!'), _('You have no permission to access %s ') % (gateway.name,))
url = gateway.url
name = url
if gateway.method == 'http':
prms = {}
for p in data.gateway.property_ids:
if p.type == 'user':
prms[p.name] = p.value
elif p.type == 'password':
prms[p.name] = p.value
elif p.type == 'to':
prms[p.name] = data.mobile_to
elif p.type == 'sms':
prms[p.name] = data.text
elif p.type == 'extra':
prms[p.name] = p.value
params = urllib.urlencode(prms)
name = url + "?" + params
queue_obj = self.pool.get('sms.smsclient.queue')
vals = self._prepare_smsclient_queue(cr, uid, data, name, context=context)
queue_obj.create(cr, uid, vals, context=context)
return True
def _check_queue(self, cr, uid, context=None):
if context is None:
context = {}
queue_obj = self.pool.get('sms.smsclient.queue')
history_obj = self.pool.get('sms.smsclient.history')
sids = queue_obj.search(cr, uid, [
('state', '!=', 'send'),
('state', '!=', 'sending')
], limit=30, context=context)
queue_obj.write(cr, uid, sids, {'state': 'sending'}, context=context)
error_ids = []
sent_ids = []
for sms in queue_obj.browse(cr, uid, sids, context=context):
if sms.gateway_id.char_limit:
if len(sms.msg) > 160:
error_ids.append(sms.id)
continue
if sms.gateway_id.method == 'http':
try:
urllib.urlopen(sms.name)
except Exception as e:
raise orm.except_orm('Error', e)
### New Send Process OVH Dedicated ###
## Parameter Fetch ##
if sms.gateway_id.method == 'smpp':
for p in sms.gateway_id.property_ids:
if p.type == 'user':
login = p.value
elif p.type == 'password':
pwd = p.value
elif p.type == 'sender':
sender = p.value
elif p.type == 'sms':
account = p.value
try:
soap = WSDL.Proxy(sms.gateway_id.url)
message = ''
if sms.coding == '2':
message = str(sms.msg).decode('iso-8859-1').encode('utf8')
if sms.coding == '1':
message = str(sms.msg)
result = soap.telephonySmsUserSend(str(login), str(pwd),
str(account), str(sender), str(sms.mobile), message,
int(sms.validity), int(sms.classes), int(sms.deferred),
int(sms.priority), int(sms.coding),str(sms.gateway_id.tag), int(sms.gateway_id.nostop))
### End of the new process ###
except Exception as e:
raise orm.except_orm('Error', e)
history_obj.create(cr, uid, {
'name': _('SMS Sent'),
'gateway_id': sms.gateway_id.id,
'sms': sms.msg,
'to': sms.mobile,
}, context=context)
sent_ids.append(sms.id)
queue_obj.write(cr, uid, sent_ids, {'state': 'send'}, context=context)
queue_obj.write(cr, uid, error_ids, {
'state': 'error',
'error': 'Size of SMS should not be more then 160 char'
}, context=context)
return True
class SMSQueue(orm.Model):
_name = 'sms.smsclient.queue'
_description = 'SMS Queue'
_columns = {
'name': fields.text('SMS Request', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'msg': fields.text('SMS Text', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'mobile': fields.char('Mobile No', size=256,
required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'gateway_id': fields.many2one('sms.smsclient',
'SMS Gateway', readonly=True,
states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Queued'),
('sending', 'Waiting'),
('send', 'Sent'),
('error', 'Error'),
], 'Message Status', select=True, readonly=True),
'error': fields.text('Last Error', size=256,
readonly=True,
states={'draft': [('readonly', False)]}),
'date_create': fields.datetime('Date', readonly=True),
'validity': fields.integer('Validity',
help='The maximum time -in minute(s)- before the message is dropped'),
'classes': fields.selection([
('0', 'Flash'),
('1', 'Phone display'),
('2', 'SIM'),
('3', 'Toolkit')
], 'Class', help='The sms class: flash(0), phone display(1), SIM(2), toolkit(3)'),
'deferred': fields.integer('Deferred',
help='The time -in minute(s)- to wait before sending the message'),
'priority': fields.selection([
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3')
], 'Priority', help='The priority of the message '),
'coding': fields.selection([
('1', '7 bit'),
('2', 'Unicode')
], 'Coding', help='The sms coding: 1 for 7 bit or 2 for unicode'),
'tag': fields.char('Tag', size=256,
help='An optional tag'),
'nostop': fields.boolean('NoStop', help='Do not display STOP clause in the message, this requires that this is not an advertising message'),
}
_defaults = {
'date_create': fields.datetime.now,
'state': 'draft',
}
class Properties(orm.Model):
_name = 'sms.smsclient.parms'
_description = 'SMS Client Properties'
_columns = {
'name': fields.char('Property name', size=256,
help='Name of the property whom appear on the URL'),
'value': fields.char('Property value', size=256,
help='Value associate on the property for the URL'),
'gateway_id': fields.many2one('sms.smsclient', 'SMS Gateway'),
'type': fields.selection([
('user', 'User'),
('password', 'Password'),
('sender', 'Sender Name'),
('to', 'Recipient No'),
('sms', 'SMS Message'),
('extra', 'Extra Info')
], 'API Method', select=True,
help='If parameter concern a value to substitute, indicate it'),
}
class HistoryLine(orm.Model):
_name = 'sms.smsclient.history'
_description = 'SMS Client History'
_columns = {
'name': fields.char('Description', size=160, required=True, readonly=True),
'date_create': fields.datetime('Date', readonly=True),
'user_id': fields.many2one('res.users', 'Username', readonly=True, select=True),
'gateway_id': fields.many2one('sms.smsclient', 'SMS Gateway', ondelete='set null', required=True),
'to': fields.char('Mobile No', size=15, readonly=True),
'sms': fields.text('SMS', size=160, readonly=True),
}
_defaults = {
'date_create': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
super(HistoryLine, self).create(cr, uid, vals, context=context)
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,570,020,406,153,420,000 | 40.730392 | 148 | 0.508458 | false |
snickl/buildroot-iu | support/testing/infra/builder.py | 3 | 2028 | import os
import shutil
import subprocess
import infra
class Builder(object):
def __init__(self, config, builddir, logtofile):
self.config = '\n'.join([line.lstrip() for line in
config.splitlines()]) + '\n'
self.builddir = builddir
self.logfile = infra.open_log_file(builddir, "build", logtofile)
def configure(self):
if not os.path.isdir(self.builddir):
os.makedirs(self.builddir)
config_file = os.path.join(self.builddir, ".config")
with open(config_file, "w+") as cf:
cf.write(self.config)
# dump the defconfig to the logfile for easy debugging
self.logfile.write("> start defconfig\n" + self.config +
"> end defconfig\n")
self.logfile.flush()
env = {"PATH": os.environ["PATH"]}
cmd = ["make",
"O={}".format(self.builddir),
"olddefconfig"]
ret = subprocess.call(cmd, stdout=self.logfile, stderr=self.logfile,
env=env)
if ret != 0:
raise SystemError("Cannot olddefconfig")
def build(self):
env = {"PATH": os.environ["PATH"]}
if "http_proxy" in os.environ:
self.logfile.write("Using system proxy: " +
os.environ["http_proxy"] + "\n")
env['http_proxy'] = os.environ["http_proxy"]
env['https_proxy'] = os.environ["http_proxy"]
cmd = ["make", "-C", self.builddir]
ret = subprocess.call(cmd, stdout=self.logfile, stderr=self.logfile,
env=env)
if ret != 0:
raise SystemError("Build failed")
open(self.stamp_path(), 'a').close()
def stamp_path(self):
return os.path.join(self.builddir, "build-done")
def is_finished(self):
return os.path.exists(self.stamp_path())
def delete(self):
if os.path.exists(self.builddir):
shutil.rmtree(self.builddir)
| gpl-2.0 | -6,377,800,908,187,158,000 | 33.372881 | 76 | 0.542899 | false |
bderembl/mitgcm_configs | eddy_airsea/analysis/ode_wave.py | 1 | 1112 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
plt.ion()
f0 = 1e-4
u0 = 1.0
R0 = 40e3 # radius
vmax = -1.0 # m/s
def v1(rr):
v = -vmax*rr/R0*np.exp(-0.5*(rr/R0)**2)
# v = -vmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2)
return v
def dv1(rr):
v = -vmax/R0*np.exp(-0.5*(rr/R0)**2)*(1-(rr/R0)**2)
# v = -vmax*2/R0*np.tanh(rr/R0)/((np.cosh(rr/R0))**2)*(1/(np.cosh(rr/R0))**2 - (np.tanh(rr/R0))**2)/(np.tanh(1.0)/(np.cosh(1.0))**2)
return v
def f(r, t):
omega = np.sqrt((dv1(r)+v1(r)/r + f0)*(2*v1(r)/r + f0))
return u0*np.sin(omega*t)
si_r = 30
si_t = 30000
r0 = np.linspace(1,5*R0,si_r)
t = np.linspace(0, si_t/f0/1000, si_t)
ra = np.zeros((si_t,si_r))
for ni in range(0,si_r):
ra[:,ni] = integrate.odeint(f, r0[ni], t).squeeze()
plt.figure()
plt.plot(t*f0/(2*np.pi),ra/R0,'k',linewidth=1)
plt.xlabel(r'$tf/2\pi$')
plt.ylabel(r'$r_p/R_0$')
plt.xlim([np.min(t*f0/(2*np.pi)), np.max(t*f0/(2*np.pi))])
plt.ylim([np.min(ra/R0), 1.05*np.max(ra/R0)])
plt.savefig("ode_k0.pdf",bbox_inches='tight')
| mit | -6,112,656,027,416,300,000 | 23.173913 | 133 | 0.579137 | false |
cmc333333/regulations-parser | regparser/tree/paragraph.py | 1 | 6226 | import hashlib
import re
from regparser.tree import struct
from regparser.tree.depth import markers as mtypes
from regparser.search import segments
p_levels = [list(mtypes.lower), list(mtypes.ints), list(mtypes.roman),
list(mtypes.upper), list(mtypes.em_ints), list(mtypes.em_roman)]
def p_level_of(marker):
"""Given a marker(string), determine the possible paragraph levels it
could fall into. This is useful for determining the order of
paragraphs"""
potential_levels = []
for level, markers in enumerate(p_levels):
if marker in markers:
potential_levels.append(level)
return potential_levels
_NONWORDS = re.compile(r'\W+')
def hash_for_paragraph(text):
"""Hash a chunk of text and convert it into an integer for use with a
MARKERLESS paragraph identifier. We'll trim to just 8 hex characters for
legibility. We don't need to fear hash collisions as we'll have 16**8 ~ 4
billion possibilities. The birthday paradox tells us we'd only expect
collisions after ~ 60 thousand entries. We're expecting at most a few
hundred"""
phrase = _NONWORDS.sub('', text.lower())
hashed = hashlib.sha1(phrase).hexdigest()[:8]
return int(hashed, 16)
class ParagraphParser():
def __init__(self, p_regex, node_type):
"""p_regex is the regular expression used when searching through
paragraphs. It should contain a %s for the next paragraph 'part'
(e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which
takes the current label, and the next paragraph 'part' and produces
a new label."""
self.p_regex = p_regex
self.node_type = node_type
def matching_subparagraph_ids(self, p_level, paragraph):
"""Return a list of matches if this paragraph id matches one of the
subparagraph ids (e.g. letter (i) and roman numeral (i)."""
matches = []
for depth in range(p_level+1, len(p_levels)):
for sub_id, sub in enumerate(p_levels[depth]):
if sub == p_levels[p_level][paragraph]:
matches.append((depth, sub_id))
return matches
def best_start(self, text, p_level, paragraph, starts, exclude=[]):
"""Given a list of potential paragraph starts, pick the best based
on knowledge of subparagraph structure. Do this by checking if the
id following the subparagraph (e.g. ii) is between the first match
and the second. If so, skip it, as that implies the first match was
a subparagraph."""
subparagraph_hazards = self.matching_subparagraph_ids(
p_level, paragraph)
starts = starts + [(len(text), len(text))]
for i in range(1, len(starts)):
_, prev_end = starts[i-1]
next_start, _ = starts[i]
s_text = text[prev_end:next_start]
s_exclude = [
(e_start + prev_end, e_end + prev_end)
for e_start, e_end in exclude]
is_subparagraph = False
for hazard_level, hazard_idx in subparagraph_hazards:
if self.find_paragraph_start_match(
s_text, hazard_level, hazard_idx + 1, s_exclude):
is_subparagraph = True
if not is_subparagraph:
return starts[i-1]
def find_paragraph_start_match(self, text, p_level, paragraph, exclude=[]):
"""Find the positions for the start and end of the requested label.
p_Level is one of 0,1,2,3; paragraph is the index within that label.
Return None if not present. Does not return results in the exclude
list (a list of start/stop indices). """
if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph:
return None
match_starts = [(m.start(), m.end()) for m in re.finditer(
self.p_regex % p_levels[p_level][paragraph], text)]
match_starts = [
(start, end) for start, end in match_starts
if all([end < es or start > ee for es, ee in exclude])]
if len(match_starts) == 0:
return None
elif len(match_starts) == 1:
return match_starts[0]
else:
return self.best_start(
text, p_level, paragraph, match_starts, exclude)
def paragraph_offsets(self, text, p_level, paragraph, exclude=[]):
"""Find the start/end of the requested paragraph. Assumes the text
does not just up a p_level -- see build_paragraph_tree below."""
start = self.find_paragraph_start_match(
text, p_level, paragraph, exclude)
if start is None:
return None
id_start, id_end = start
end = self.find_paragraph_start_match(
text[id_end:], p_level, paragraph + 1,
[(e_start - id_end, e_end - id_end)
for e_start, e_end in exclude])
if end is None:
end = len(text)
else:
end = end[0] + id_end
return (id_start, end)
def paragraphs(self, text, p_level, exclude=[]):
"""Return a list of paragraph offsets defined by the level param."""
def offsets_fn(remaining_text, p_idx, exclude):
return self.paragraph_offsets(
remaining_text, p_level, p_idx, exclude)
return segments(text, offsets_fn, exclude)
def build_tree(self, text, p_level=0, exclude=[], label=[],
title=''):
"""
Build a dict to represent the text hierarchy.
"""
subparagraphs = self.paragraphs(text, p_level, exclude)
if subparagraphs:
body_text = text[0:subparagraphs[0][0]]
else:
body_text = text
children = []
for paragraph, (start, end) in enumerate(subparagraphs):
new_text = text[start:end]
new_excludes = [(e[0] - start, e[1] - start) for e in exclude]
new_label = label + [p_levels[p_level][paragraph]]
children.append(
self.build_tree(
new_text, p_level + 1, new_excludes, new_label))
return struct.Node(body_text, children, label, title, self.node_type)
| cc0-1.0 | 749,164,101,625,689,200 | 40.785235 | 79 | 0.596209 | false |
eamars/webserver | site-package/roster/sql.py | 1 | 4194 | import mysql.connector
SQL_CREATE_TABLE = \
"""
CREATE TABLE `{}` (
`date` date NOT NULL UNIQUE,
`chair` char(64) NOT NULL DEFAULT '',
`minute` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
"""
def create_database(cursor, database_name):
try:
cursor.execute("CREATE DATABASE `{}` DEFAULT CHARACTER SET 'utf8'".format(database_name))
except mysql.connector.Error as e:
print("Error [{}]: failed to create database [{}]".format(e, database_name))
raise Exception("MySQL")
def create_table(cursor, table_name):
try:
cursor.execute(SQL_CREATE_TABLE.format(table_name))
except mysql.connector.Error as e:
print("Error [{}]: failed to create table [{}]".format(e, table_name))
raise Exception("MySQL")
def establish_connection(config):
# Connection to server
connection = mysql.connector.connect(**config)
return connection
def close_connection(connection):
connection.close()
def connect_database(connection, database_name):
# Connect to database, or create a new one
try:
connection.database = database_name
except mysql.connector.Error as e:
if e.errno == 1049:
# Get cursor
cursor = connection.cursor()
print("Creating database [{}]".format(database_name))
create_database(cursor, database_name)
# Close cursor
cursor.close()
connection.database = database_name
else:
print("Error [{}]: connect database".format(e))
raise Exception("MySQL")
def entry_exists(connection, table_name, condition):
cursor = connection.cursor()
sql = "SELECT COUNT(*) FROM `{}` WHERE {}".format(table_name, condition)
# print(sql)
try:
cursor.execute(sql)
for result in cursor:
if result[0] == 0:
cursor.close()
return False
else:
cursor.close()
return True
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
cursor.close()
return False
else:
print("Error [{}]: entry exists".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def fetch_entry(connection, table_name, condition):
cursor = connection.cursor()
sql = "SELECT `chair`, `minute` from `{}` WHERE {}".format(table_name, condition)
try:
cursor.execute(sql)
for result in cursor:
return result[0], result[1]
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
cursor.close()
return False
else:
print("Error [{}]: entry exists".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def insert_entry(connection, table_name, value):
cursor = connection.cursor()
sql = "INSERT INTO `{}` {}".format(table_name, value)
# print(sql)
try:
cursor.execute(sql)
cursor.close()
except mysql.connector.Error as e:
if e.errno == 1146: # Table doesn't exist
print("Creating table [{}]".format(table_name))
create_table(cursor, table_name)
# Try to execute again
cursor.execute(sql)
cursor.close()
else:
print("Error [{}]: insert entry".format(e))
print(sql)
cursor.close()
raise Exception("MySQL")
def main():
SQL_CONFIG = {
"host": "192.168.2.5",
"user": "eamars",
"password": "931105",
"autocommit": True
}
connection = establish_connection(SQL_CONFIG)
connect_database(connection, "test")
print(entry_exists(connection, "roster", "chair=`Ran Bao`"))
close_connection(connection)
if __name__ == "__main__":
main()
| mit | -4,404,401,249,818,260,000 | 27.147651 | 97 | 0.572246 | false |
shear/rppy | rppy/fluid.py | 2 | 5952 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# rppy - a geophysical library for Python
# Copyright (c) 2014, Sean M. Contenti
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
def ciz_shapiro(K0, Kdry, Kf, u0, udry, uf, phi, Kphi=None, uphi=None):
"""
Generalized form of Gassmann's equation to perform fluid substitution to
allow for a solid (non-zero shear modulus) pore-filling material.
"""
if Kphi is None:
Kphi = K0
Ksat = (1/Kdry - (1/Kdry - 1/K0)**2 /
(phi*(1/Kf - 1/Kphi) + (1/Kdry - 1/K0)))
usat = (1/udry - (1/udry - 1/u0)**2 /
(phi*(1/uf - 1/uphi) + (1/udry - 1/u0)))
return(Ksat, usat)
def gassmann(K0, Kin, Kfin, Kfout, phi):
"""
Use Gassmann's equation to perform fluid substitution. Use the bulk modulus
of a rock saturated with one fluid (or dry frame, Kfin=0) to preduct the
bulk modulus of a rock second with a second fluid.
:param K0: Frame mineral modulus (Gpa)
:param Kin: Input rock modulus (can be fluid saturated or dry)
:param Kfin: Bulk modulus of the pore-filling fluid of the inital rock
(0 if input is the dry-rock modulus)
:param Kfout: Bulk modulus of the pore-filling fluid of the output
(0 if output is dry-rock modulus)
:param phi: Porosity of the rock
"""
A = Kfout / (phi*(K0 - Kfout))
B = Kin / (K0 - Kin)
C = Kfin / (phi*(K0 - Kfin))
D = A + B - C
Kout = K0*D / (1 + D)
return(Kout)
def batzle_wang(P, T, fluid, S=None, G=None, api=None):
"""
Calculate the elastic properties of reservoir fluids using the
Batzle & Wang [1992] equations.
:param P: Pressure (MPa)
:param T: Temperature {deg C)
:param fluid: Fluid type to calculate: brine, gas, or oil
:param S: Salinity (brine only, in ppm)
:param G: Gas gravity (gas mode only, ratio of gas density to air density
at 15.6C and atmospheric pressure)
:param api: American Petroleum Insitute (API) oil gravity
"""
if fluid == 'brine':
S = S / (10**6) # ppm to fraction of one
w = np.array([
[1402.85, 1.524, 3.437e-3, -1.197e-5],
[4.871, -0.0111, 1.739e-4, -1.628e-6],
[-0.04783, 2.747e-4, -2.135e-6, 1.237e-8],
[1.487e-4, -6.503e-7, -1.455e-8, 1.327e-10],
[-2.197e-7, 7.987e-10, 5.230e-11, -4.614e-13],
])
rhow = (1 + (10**-6)*(-80*T - 3.3*(T**2) + 0.00175*(T**3) +
489*P - 2*T*P + 0.016*(T**2)*P - (1.3e-5)*(T**3)*P -
0.333*(P**2) - 0.002*T*(P**2)))
rhob = rhow + S*(0.668 + 0.44*S + (10**-6)*(300*P - 2400*P*S +
T*(80 + 3*T - 3300*S - 13*P + 47*P*S)))
Vw = 0
for i in range(4):
for j in range(3):
Vw = Vw + w[i][j]*T**i*P**j
Vb = (Vw + S*(1170 - 9.8*T + 0.055*T**2 - 8.5e-5*T**3 + 2.6*P -
0.0029*T*P - 0.0476*P**2) + S**(3/2)*(780 - 10*P + 0.16*P**2) -
1820*S**2)
out = {'rho': rhob, 'Vp': Vb}
elif fluid == 'oil':
Rg = 2.03*G*(P*np.exp(0.02878*api - 0.00377*T))**1.205
rho0 = 141.5 / (api + 131.5)
B0 = 0.972 + 0.00038*(2.4*Rg*(G/rho0)**0.5 + T + 17.8)**(1.175)
rho_r = (rho0/B0)*(1 + 0.001*Rg)**-1 # pseudo-density of oil
rhog = (rho0 + 0.0012*G*Rg)/B0 # density of oil with gas
rhop = (rhog + (0.00277*P - # correct for pressure
1.71e-7*P**3)*(rhog - 1.15)**2 + 3.49e-4*P)
rho = rhop / (0.972 + 3.81e-4*(T + 17.78)**1.175) # correct for temp
Vp = 2096*(rho_r / (2.6 - rho_r))**0.5 - 3.7*T + 4.64*P + 0.0115*(
4.12*(1.08/rho_r - 1)**0.5 -1)*T*P
out = {'rho': rho, 'Vp': Vp}
elif fluid == 'gas':
Ta = T + 273.15 # absolute temperature
Pr = P / (4.892 - 0.4048*G) # pseudo-pressure
Tr = Ta / (94.72 + 170.75*G) # pseudo-temperature
R = 8.31441
d = np.exp(-(0.45 + 8*(0.56 - 1/Tr)**2)*Pr**1.2/Tr)
c = 0.109*(3.85 - Tr)**2
b = 0.642*Tr - 0.007*Tr**4 - 0.52
a = 0.03 + 0.00527*(3.5 - Tr)**3
m = 1.2*(-(0.45 + 8*(0.56 - 1/Tr)**2)*Pr**0.2/Tr)
y = (0.85 + 5.6/(Pr + 2) + 27.1/(Pr + 3.5)**2 -
8.7*np.exp(-0.65*(Pr + 1)))
f = c*d*m + a
E = c*d
Z = a*Pr + b + E
rhog = (28.8*G*P) / (Z*R*Ta)
Kg = P*y / (1 - Pr*f/Z)
out = {'rho': rhog, 'K': Kg}
else:
out = None
return(out)
| bsd-2-clause | -4,058,591,948,400,436,700 | 36.670886 | 79 | 0.544859 | false |
cmacmackin/ford | ford/graphs.py | 1 | 48315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# graphs.py
# This file is part of FORD.
#
# Copyright 2015 Christopher MacMackin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from __future__ import print_function
import os
import shutil
import re
import copy
import colorsys
from graphviz import Digraph
from ford.sourceform import FortranFunction, FortranSubroutine, FortranInterface, FortranProgram, FortranType, FortranModule, FortranSubmodule, FortranSubmoduleProcedure, FortranSourceFile, FortranBlockData
_coloured_edges = False
def set_coloured_edges(val):
'''
Public accessor to set whether to use coloured edges in graph or just
use black ones.
'''
global _coloured_edges
_coloured_edges = val
_parentdir = ''
def set_graphs_parentdir(val):
'''
Public accessor to set the parent directory of the graphs.
Needed for relative paths.
'''
global _parentdir
_parentdir = val
def rainbowcolour(depth, maxd):
if _coloured_edges:
(r, g, b) = colorsys.hsv_to_rgb(float(depth) / maxd, 1.0, 1.0)
R, G, B = int(255 * r), int(255 * g), int(255 * b)
return R, G, B
else:
return 0, 0, 0
HYPERLINK_RE = re.compile("^\s*<\s*a\s+.*href=(\"[^\"]+\"|'[^']+').*>(.*)</\s*a\s*>\s*$",re.IGNORECASE)
WIDTH_RE = re.compile('width="(.*?)pt"',re.IGNORECASE)
HEIGHT_RE = re.compile('height="(.*?)pt"',re.IGNORECASE)
EM_RE = re.compile('<em>(.*)</em>',re.IGNORECASE)
graphviz_installed = True
def newdict(old,key,val):
new = copy.copy(old)
new[key] = val
return new
def is_module(obj,cls):
return isinstance(obj,FortranModule) or issubclass(cls,FortranModule)
def is_submodule(obj,cls):
return isinstance(obj,FortranSubmodule) or issubclass(cls,FortranSubmodule)
def is_type(obj,cls):
return isinstance(obj,FortranType) or issubclass(cls,FortranType)
def is_proc(obj,cls):
return (isinstance(obj,(FortranFunction,FortranSubroutine,
FortranInterface,FortranSubmoduleProcedure))
or issubclass(cls,(FortranFunction,FortranSubroutine,
FortranInterface,FortranSubmoduleProcedure)))
def is_program(obj, cls):
return isinstance(obj,FortranProgram) or issubclass(cls,FortranProgram)
def is_sourcefile(obj, cls):
return isinstance(obj,FortranSourceFile) or issubclass(cls,FortranSourceFile)
def is_blockdata(obj, cls):
return isinstance(obj,FortranBlockData) or issubclass(cls,FortranBlockData)
class GraphData(object):
"""
Contains all of the nodes which may be displayed on a graph.
"""
def __init__(self):
self.submodules = {}
self.modules = {}
self.types = {}
self.procedures = {}
self.programs = {}
self.sourcefiles = {}
self.blockdata = {}
def register(self,obj,cls=type(None),hist={}):
"""
Takes a FortranObject and adds it to the appropriate list, if
not already present.
"""
#~ ident = getattr(obj,'ident',obj)
if is_submodule(obj,cls):
if obj not in self.submodules: self.submodules[obj] = SubmodNode(obj,self)
elif is_module(obj,cls):
if obj not in self.modules: self.modules[obj] = ModNode(obj,self)
elif is_type(obj,cls):
if obj not in self.types: self.types[obj] = TypeNode(obj,self,hist)
elif is_proc(obj,cls):
if obj not in self.procedures: self.procedures[obj] = ProcNode(obj,self,hist)
elif is_program(obj,cls):
if obj not in self.programs: self.programs[obj] = ProgNode(obj,self)
elif is_sourcefile(obj,cls):
if obj not in self.sourcefiles: self.sourcefiles[obj] = FileNode(obj,self)
elif is_blockdata(obj,cls):
if obj not in self.blockdata: self.blockdata[obj] = BlockNode(obj,self)
else:
raise BadType("Object type {} not recognized by GraphData".format(type(obj).__name__))
def get_node(self,obj,cls=type(None),hist={}):
"""
Returns the node corresponding to obj. If does not already exist
then it will create it.
"""
#~ ident = getattr(obj,'ident',obj)
if obj in self.modules and is_module(obj,cls):
return self.modules[obj]
elif obj in self.submodules and is_submodule(obj,cls):
return self.submodules[obj]
elif obj in self.types and is_type(obj,cls):
return self.types[obj]
elif obj in self.procedures and is_proc(obj,cls):
return self.procedures[obj]
elif obj in self.programs and is_program(obj,cls):
return self.programs[obj]
elif obj in self.sourcefiles and is_sourcefile(obj,cls):
return self.sourcefiles[obj]
elif obj in self.blockdata and is_blockdata(obj,cls):
return self.blockdata[obj]
else:
self.register(obj,cls,hist)
return self.get_node(obj,cls,hist)
class BaseNode(object):
colour = '#777777'
def __init__(self,obj):
self.attribs = {'color':self.colour,
'fontcolor':'white',
'style':'filled'}
self.fromstr = type(obj) is str
self.url = None
if self.fromstr:
m = HYPERLINK_RE.match(obj)
if m:
self.url = m.group(1)[1:-1]
self.name = m.group(2)
else:
self.name = obj
self.ident = self.name
else:
d = obj.get_dir()
if not d: d = 'none'
self.ident = d + '~' + obj.ident
self.name = obj.name
m = EM_RE.search(self.name)
if m: self.name = '<<i>'+m.group(1).strip()+'</i>>'
self.url = obj.get_url()
self.attribs['label'] = self.name
if self.url and getattr(obj,'visible',True):
if self.fromstr:
self.attribs['URL'] = self.url
else:
self.attribs['URL'] = _parentdir + self.url
self.afferent = 0
self.efferent = 0
def __eq__(self, other):
return self.ident == other.ident
def __hash__(self):
return hash(self.ident)
class ModNode(BaseNode):
colour = '#337AB7'
def __init__(self,obj,gd):
super(ModNode,self).__init__(obj)
self.uses = set()
self.used_by = set()
self.children = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
n.afferent += 1
self.uses.add(n)
self.efferent += n.efferent
class SubmodNode(ModNode):
colour = '#5bc0de'
def __init__(self,obj,gd):
super(SubmodNode,self).__init__(obj,gd)
del self.used_by
if not self.fromstr:
if obj.ancestor:
self.ancestor = gd.get_node(obj.ancestor,FortranSubmodule)
else:
self.ancestor = gd.get_node(obj.ancestor_mod,FortranModule)
self.ancestor.children.add(self)
self.efferent += 1
self.ancestor.afferent += 1
class TypeNode(BaseNode):
colour = '#5cb85c'
def __init__(self,obj,gd,hist={}):
super(TypeNode,self).__init__(obj)
self.ancestor = None
self.children = set()
self.comp_types = dict()
self.comp_of = dict()
if not self.fromstr:
if obj.extends:
if obj.extends in hist:
self.ancestor = hist[obj.extends]
else:
self.ancestor = gd.get_node(obj.extends,FortranType,newdict(hist,obj,self))
self.ancestor.children.add(self)
self.ancestor.visible = getattr(obj.extends,'visible',True)
for var in obj.local_variables:
if (var.vartype == 'type' or var.vartype == 'class') and var.proto[0] != '*':
if var.proto[0] == obj:
n = self
elif var.proto[0] in hist:
n = hist[var.proto[0]]
else:
n = gd.get_node(var.proto[0],FortranType,newdict(hist,obj,self))
n.visible = getattr(var.proto[0],'visible',True)
if self in n.comp_of:
n.comp_of[self] += ', ' + var.name
else:
n.comp_of[self] = var.name
if n in self.comp_types:
self.comp_types[n] += ', ' + var.name
else:
self.comp_types[n] = var.name
class ProcNode(BaseNode):
@property
def colour(self):
if self.proctype.lower() == 'subroutine':
return '#d9534f'
elif self.proctype.lower() == 'function':
return '#d94e8f'
elif self.proctype.lower() == 'interface':
return '#A7506F'
#~ return '#c77c25'
else:
return super(ProcNode,self).colour
def __init__(self,obj,gd,hist={}):
#ToDo: Figure out appropriate way to handle interfaces to routines in submodules.
self.proctype = getattr(obj,'proctype','')
super(ProcNode,self).__init__(obj)
self.uses = set()
self.calls = set()
self.called_by = set()
self.interfaces = set()
self.interfaced_by = set()
if not self.fromstr:
for u in getattr(obj,'uses',[]):
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
for c in getattr(obj,'calls',[]):
if getattr(c,'visible',True):
if c == obj:
n = self
elif c in hist:
n = hist[c]
else:
n = gd.get_node(c,FortranSubroutine,newdict(hist,obj,self))
n.called_by.add(self)
self.calls.add(n)
if obj.proctype.lower() == 'interface':
for m in getattr(obj,'modprocs',[]):
if m.procedure and getattr(m.procedure,'visible',True):
if m.procedure in hist:
n = hist[m.procedure]
else:
n = gd.get_node(m.procedure,FortranSubroutine,newdict(hist,obj,self))
n.interfaced_by.add(self)
self.interfaces.add(n)
if hasattr(obj,'procedure') and obj.procedure.module and obj.procedure.module != True and getattr(obj.procedure.module,'visible',True):
if obj.procedure.module in hist:
n = hist[obj.procedure.module]
else:
n = gd.get_node(obj.procedure.module,FortranSubroutine,newdict(hist,obj,self))
n.interfaced_by.add(self)
self.interfaces.add(n)
class ProgNode(BaseNode):
colour = '#f0ad4e'
def __init__(self,obj,gd):
super(ProgNode,self).__init__(obj)
self.uses = set()
self.calls = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
for c in obj.calls:
if getattr(c,'visible',True):
n = gd.get_node(c,FortranSubroutine)
n.called_by.add(self)
self.calls.add(n)
class BlockNode(BaseNode):
colour = '#5cb85c'
def __init__(self,obj,gd):
super(BlockNode,self).__init__(obj)
self.uses = set()
if not self.fromstr:
for u in obj.uses:
n = gd.get_node(u,FortranModule)
n.used_by.add(self)
self.uses.add(n)
class FileNode(BaseNode):
colour = '#f0ad4e'
def __init__(self,obj,gd,hist={}):
super(FileNode,self).__init__(obj)
self.afferent = set() # Things depending on this file
self.efferent = set() # Things this file depends on
if not self.fromstr:
for mod in obj.modules:
for dep in mod.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for mod in obj.submodules:
for dep in mod.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for proc in obj.functions + obj.subroutines:
for dep in proc.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for prog in obj.programs:
for dep in prog.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
for block in obj.blockdata:
for dep in block.deplist:
if dep.hierarchy[0] == obj:
continue
elif dep.hierarchy[0] in hist:
n = hist[dep.hierarchy[0]]
else:
n = gd.get_node(dep.hierarchy[0],FortranSourceFile,newdict(hist,obj,self))
n.afferent.add(self)
self.efferent.add(n)
class FortranGraph(object):
"""
Object used to construct the graph for some particular entity in the code.
"""
data = GraphData()
RANKDIR = 'RL'
def __init__(self,root,webdir='',ident=None):
"""
Initialize the graph, root is the object or list of objects,
for which the graph is to be constructed.
The webdir is the url where the graph should be stored, and
ident can be provided to override the default identifacation
of the graph that will be used to construct the name of the
imagefile. It has to be provided if there are multiple root
nodes.
"""
self.root = [] # root nodes
self.hopNodes = [] # nodes of the hop which exceeded the maximum
self.hopEdges = [] # edges of the hop which exceeded the maximum
self.added = set() # nodes added to the graph
self.max_nesting = 0 # maximum numbers of hops allowed
self.max_nodes = 1 # maximum numbers of nodes allowed
self.warn = False # should warnings be written?
self.truncated = -1 # nesting where the graph was truncated
try:
for r in root:
self.root.append(self.data.get_node(r))
self.max_nesting = max(self.max_nesting,
int(r.meta['graph_maxdepth']))
self.max_nodes = max(self.max_nodes,
int(r.meta['graph_maxnodes']))
self.warn = self.warn or (r.settings['warn'].lower() == 'true')
except TypeError:
self.root.append(self.data.get_node(root))
self.max_nesting = int(root.meta['graph_maxdepth'])
self.max_nodes = max(self.max_nodes,
int(root.meta['graph_maxnodes']))
self.warn = root.settings['warn'].lower() == 'true'
self.webdir = webdir
if ident:
self.ident = ident + '~~' + self.__class__.__name__
else:
self.ident = root.get_dir() + '~~' + root.ident + '~~' + self.__class__.__name__
self.imgfile = self.ident
self.dot = Digraph(self.ident,
graph_attr={'size':'8.90625,1000.0',
'rankdir':self.RANKDIR,
'concentrate':'true',
'id':self.ident},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
# add root nodes to the graph
for n in self.root:
if len(self.root) == 1:
self.dot.node(n.ident, label=n.name)
else:
self.dot.node(n.ident, **n.attribs)
self.added.add(n)
# add nodes and edges depending on the root nodes to the graph
self.add_nodes(self.root)
#~ self.linkmap = self.dot.pipe('cmapx').decode('utf-8')
if graphviz_installed:
self.svg_src = self.dot.pipe().decode('utf-8')
self.svg_src = self.svg_src.replace('<svg ','<svg id="' + re.sub('[^\w]','',self.ident) + '" ')
w = int(WIDTH_RE.search(self.svg_src).group(1))
if isinstance(self,(ModuleGraph,CallGraph,TypeGraph)):
self.scaled = (w >= 855)
else:
self.scaled = (w >= 641)
else:
self.svg_src = ''
self.scaled = False
def add_to_graph(self, nodes, edges, nesting):
"""
Adds nodes and edges to the graph as long as the maximum number
of nodes is not exceeded.
All edges are expected to have a reference to an entry in nodes.
If the list of nodes is not added in the first hop due to graph
size limitations, they are stored in hopNodes.
If the graph was extended the function returns True, otherwise the
result will be False.
"""
if (len(nodes) + len(self.added)) > self.max_nodes:
if nesting < 2:
self.hopNodes = nodes
self.hopEdges = edges
self.truncated = nesting
return False
else:
for n in nodes:
self.dot.node(n.ident, **n.attribs)
for e in edges:
if len(e) == 5:
self.dot.edge(e[0].ident, e[1].ident, style=e[2],
color=e[3], label=e[4])
else:
self.dot.edge(e[0].ident, e[1].ident, style=e[2],
color=e[3])
self.added.update(nodes)
return True
def __str__(self):
"""
The string of the graph is its HTML representation.
It will only be created if it is not too large.
If the graph is overly large but can represented by a single node
with many dependencies it will be shown as a table instead to ease
the rendering in browsers.
"""
graph_as_table = len(self.hopNodes) > 0 and len(self.root) == 1
# Do not render empty graphs
if len(self.added) <= 1 and not graph_as_table:
return ''
# Do not render overly large graphs.
if len(self.added) > self.max_nodes:
if self.warn:
print('Warning: Not showing graph {0} as it would exceed the maximal number of {1} nodes.'
.format(self.ident,self.max_nodes))
# Only warn once about this
self.warn = False
return ''
# Do not render incomplete graphs.
if len(self.added) < len(self.root):
if self.warn:
print('Warning: Not showing graph {0} as it would be incomplete.'.format(self.ident))
# Only warn once about this
self.warn = False
return ''
if self.warn and self.truncated > 0:
print('Warning: Graph {0} is truncated after {1} hops.'.format(self.ident,self.truncated))
# Only warn once about this
self.warn = False
zoomName = ''
svgGraph = ''
rettext = ''
if graph_as_table:
# generate a table graph if maximum number of nodes gets exceeded in
# the first hop and there is only one root node.
root = '<td class="root" rowspan="{0}">{1}</td>'.format(
len(self.hopNodes) * 2 + 1, self.root[0].attribs['label'])
if self.hopEdges[0][0].ident == self.root[0].ident:
key = 1
root_on_left = (self.RANKDIR == 'LR')
if root_on_left:
arrowtemp = ('<td class="{0}{1}">{2}</td><td rowspan="2"'
+ 'class="triangle-right"></td>')
else:
arrowtemp = ('<td rowspan="2" class="triangle-left">'
+ '</td><td class="{0}{1}">{2}</td>')
else:
key = 0
root_on_left = (self.RANKDIR == 'RL')
if root_on_left:
arrowtemp = ('<td rowspan="2" class="triangle-left">'
+ '</td><td class="{0}{1}">{2}</td>')
else:
arrowtemp = ('<td class="{0}{1}">{2}</td><td rowspan="2"'
+ 'class="triangle-right"></td>')
# sort nodes in alphabetical order
self.hopEdges.sort(key=lambda x: x[key].attribs['label'].lower())
rows = ''
for i in range(len(self.hopEdges)):
e = self.hopEdges[i]
n = e[key]
if len(e) == 5:
arrow = arrowtemp.format(e[2], 'Text', e[4])
else:
arrow = arrowtemp.format(e[2], 'Bottom', 'w')
node = '<td rowspan="2" class="node" bgcolor="{0}">'.format(
n.attribs['color'])
try:
node += '<a href="{0}">{1}</a></td>'.format(
n.attribs['URL'], n.attribs['label'])
except:
node += n.attribs['label'] + '</td>'
if root_on_left:
rows += '<tr>' + root + arrow + node + '</tr>\n'
else:
rows += '<tr>' + node + arrow + root + '</tr>\n'
rows += '<tr><td class="{0}Top">w</td></tr>\n'.format(e[2])
root = ''
rettext += '<table class="graph">\n' + rows + '</table>\n'
# generate svg graph
else:
rettext += '<div class="depgraph">{0}</div>'
svgGraph = self.svg_src
# add zoom ability for big graphs
if self.scaled:
zoomName = re.sub('[^\w]', '', self.ident)
rettext += ('<script>var pan{1} = svgPanZoom(\'#{1}\', '
'{{zoomEnabled: true,controlIconsEnabled: true, '
'fit: true, center: true,}}); </script>')
rettext += ('<div><a type="button" class="graph-help" '
'data-toggle="modal" href="#graph-help-text">Help</a>'
'</div><div class="modal fade" id="graph-help-text" '
'tabindex="-1" role="dialog"><div class="modal-dialog '
'modal-lg" role="document"><div class="modal-content">'
'<div class="modal-header"><button type="button" '
'class="close" data-dismiss="modal" aria-label="Close">'
'<span aria-hidden="true">×</span></button><h4 class'
'="modal-title" id="-graph-help-label">Graph Key</h4>'
'</div><div class="modal-body">{2}</div></div></div>'
'</div>')
return rettext.format(svgGraph, zoomName, self.get_key())
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return(bool(self.__str__()))
@classmethod
def reset(cls):
cls.data = GraphData()
def create_svg(self, out_location):
if len(self.added) > len(self.root):
self._create_image_file(os.path.join(out_location, self.imgfile))
def _create_image_file(self,filename):
if graphviz_installed:
self.dot.render(filename,cleanup=False)
shutil.move(filename,os.path.join(os.path.dirname(filename),
os.path.basename(filename)+'.gv'))
class ModuleGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes and edges for generating the graph showing the relationship
between modules and submodules listed in nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in n.uses:
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((n, nu, 'dashed', colour))
if hasattr(n, 'ancestor'):
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
class UsesGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for the modules used by those listed in nodes. Adds
edges between them. Also does this for ancestor (sub)modules.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in n.uses:
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((n, nu, 'dashed', colour))
if hasattr(n, 'ancestor'):
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class UsedByGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return MOD_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for nu in getattr(n, 'used_by', []):
if nu not in self.added:
hopNodes.add(nu)
hopEdges.append((nu, n, 'dashed', colour))
for c in getattr(n, 'children', []):
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class FileGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges showing dependencies between source files listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for ne in n.efferent:
if ne not in self.added:
hopNodes.add(ne)
hopEdges.append((ne, n, 'solid', colour))
# add nodes and edges to the graph if maximum number of nodes is not
# exceeded
self.add_to_graph(hopNodes, hopEdges, nesting)
class EfferentGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for the files which this one depends on. Adds
edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for ne in n.efferent:
if ne not in self.added:
hopNodes.add(ne)
hopEdges.append((n, ne, 'dashed', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class AfferentGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return FILE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for files which depend upon this one. Adds appropriate
edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for na in n.afferent:
if na not in self.added:
hopNodes.add(na)
hopEdges.append((na, n, 'dashed', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class TypeGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges showing inheritance and composition relationships
between derived types listed in the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for keys in n.comp_types.keys():
if keys not in self.added:
hopNodes.add(keys)
for c in n.comp_types:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((n, c, 'dashed', colour, n.comp_types[c]))
if n.ancestor:
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
class InheritsGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for c in n.comp_types:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((n, c, 'dashed', colour, n.comp_types[c]))
if n.ancestor:
if n.ancestor not in self.added:
hopNodes.add(n.ancestor)
hopEdges.append((n, n.ancestor, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class InheritedByGraph(FortranGraph):
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return TYPE_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for c in n.comp_of:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'dashed', colour, n.comp_of[c]))
for c in n.children:
if c not in self.added:
hopNodes.add(c)
hopEdges.append((c, n, 'solid', colour))
# add nodes and edges for this hop to the graph if maximum number of
# nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class CallGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds edges indicating the call-tree for the procedures listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for p in n.calls:
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'solid', colour))
for p in getattr(n, 'interfaces', []):
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'dashed', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
self.dot.attr('graph', concentrate='false')
class CallsGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for p in n.calls:
if p not in self.added:
hopNodes.add(p)
hopEdges.append((n, p, 'solid', colour))
for p in getattr(n, 'interfaces', []):
if p not in self.added:
hopNodes.add(p)
hopEdges.append((n, p, 'dashed', colour))
# add nodes, edges and atrributes for this hop to the graph if
# maximum number of nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.dot.attr('graph', concentrate='false')
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class CalledByGraph(FortranGraph):
RANKDIR = 'LR'
def get_key(self):
colour_notice = COLOURED_NOTICE if _coloured_edges else ''
return CALL_GRAPH_KEY.format(colour_notice)
def add_nodes(self, nodes, nesting=1):
"""
Adds nodes for modules using or descended from those listed in
nodes. Adds appropriate edges between them.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
if isinstance(n, ProgNode):
continue
for p in n.called_by:
if p not in self.added:
hopNodes.add(p)
hopEdges.append((p, n, 'solid', colour))
for p in getattr(n, 'interfaced_by', []):
if p not in self.added:
hopNodes.add(p)
hopEdges.append((p, n, 'dashed', colour))
# add nodes, edges and atrributes for this hop to the graph if
# maximum number of nodes is not exceeded
if not self.add_to_graph(hopNodes, hopEdges, nesting):
return
elif len(hopNodes) > 0:
if nesting < self.max_nesting:
self.dot.attr('graph', concentrate='false')
self.add_nodes(hopNodes, nesting=nesting+1)
else:
self.truncated = nesting
class BadType(Exception):
"""
Raised when a type is passed to GraphData.register() which is not
accepted.
"""
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
# Generate graph keys
gd = GraphData()
class Proc(object):
def __init__(self,name,proctype):
self.name = name
self.proctype = proctype
self.ident = ''
def get_url(self):
return ''
def get_dir(self):
return ''
sub = Proc('Subroutine','Subroutine')
func = Proc('Function','Function')
intr = Proc('Interface','Interface')
gd.register('Module',FortranModule)
gd.register('Submodule',FortranSubmodule)
gd.register('Type',FortranType)
gd.register(sub,FortranSubroutine)
gd.register(func,FortranFunction)
gd.register(intr,FortranInterface)
gd.register('Unknown Procedure Type',FortranSubroutine)
gd.register('Program',FortranProgram)
gd.register('Source File',FortranSourceFile)
try:
# Generate key for module graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
for n in [('Module',FortranModule),('Submodule',FortranSubmodule),(sub,FortranSubroutine),(func,FortranFunction),('Program', FortranProgram)]:
dot.node(getattr(n[0],'name',n[0]),**gd.get_node(n[0],cls=n[1]).attribs)
dot.node('This Page\'s Entity')
mod_svg = dot.pipe().decode('utf-8')
# Generate key for type graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
dot.node('Type',**gd.get_node('Type',cls=FortranType).attribs)
dot.node('This Page\'s Entity')
type_svg = dot.pipe().decode('utf-8')
# Generate key for call graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
for n in [(sub,FortranSubroutine),(func,FortranFunction),(intr, FortranInterface),('Unknown Procedure Type',FortranFunction),('Program', FortranProgram)]:
dot.node(getattr(n[0],'name',n[0]),**gd.get_node(n[0],cls=n[1]).attribs)
dot.node('This Page\'s Entity')
call_svg = dot.pipe().decode('utf-8')
# Generate key for file graph
dot = Digraph('Graph Key',graph_attr={'size':'8.90625,1000.0',
'concentrate':'false'},
node_attr={'shape':'box',
'height':'0.0',
'margin':'0.08',
'fontname':'Helvetica',
'fontsize':'10.5'},
edge_attr={'fontname':'Helvetica',
'fontsize':'9.5'},
format='svg', engine='dot')
dot.node('Source File',**gd.get_node('Source File',cls=FortranSourceFile).attribs)
dot.node('This Page\'s Entity')
file_svg = dot.pipe().decode('utf-8')
except RuntimeError:
graphviz_installed = False
if graphviz_installed:
NODE_DIAGRAM = """
<p>Nodes of different colours represent the following: </p>
{}
"""
MOD_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a submodule to the (sub)module which it is
descended from. Dashed arrows point from a module or program unit to
modules which it uses.{{}}
</p>
""").format(mod_svg)
TYPE_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a derived type to the parent type which it
extends. Dashed arrows point from a derived type to the other
types it contains as a components, with a label listing the name(s) of
said component(s).{{}}
</p>
""").format(type_svg)
CALL_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a procedure to one which it calls. Dashed
arrows point from an interface to procedures which implement that interface.
This could include the module procedures in a generic interface or the
implementation in a submodule of an interface in a parent module.{{}}
</p>
""").format(call_svg)
FILE_GRAPH_KEY = (NODE_DIAGRAM + """
<p>Solid arrows point from a file to a file which it depends on. A file
is dependent upon another if the latter must be compiled before the former
can be.{{}}
</p>
""").format(file_svg)
COLOURED_NOTICE = " Where possible, edges connecting nodes are given " \
"different colours to make them easier to distinguish " \
"in large graphs."
del call_svg
del file_svg
del type_svg
del mod_svg
del dot
del sub
del func
del intr
| gpl-3.0 | 925,564,929,856,560,600 | 39.329716 | 206 | 0.522881 | false |
fedora-desktop-tests/evolution | features/steps/calendar_event_editor.py | 1 | 22024 | # -*- coding: UTF-8 -*-
from behave import step, then
from dogtail.predicate import GenericPredicate
from dogtail.tree import root
from dogtail.rawinput import keyCombo, typeText
from time import sleep
from behave_common_steps import wait_until
import datetime
import os
@step(u'Create new appointment')
def create_new_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new all day appointment')
def create_new_all_day_appointment(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('All Day Appointment').click()
context.execute_steps(u"""
* Event editor with title "Appointment - No Summary" is displayed
""")
@step(u'Create new meeting')
def create_new_meeting(context):
context.app.instance.menu('File').click()
context.app.instance.menu('File').menu('New').point()
context.app.instance.menu('File').menu('New').menuItem('Meeting').click()
context.execute_steps(u"""
* Event editor with title "Meeting - No Summary" is displayed
""")
@step(u'Event editor with title "{name}" is displayed')
def event_editor_with_name_displayed(context, name):
context.app.event_editor = context.app.instance.window(name)
@step(u'Save the meeting and choose not to send meeting invitations')
def save_meeting(context):
save_meeting_and_send_notifications(context, send=False)
@step(u'Save the meeting and send meeting invitations')
def save_meeting_and_send_notifications(context, send=True):
context.app.event_editor.button('Save and Close').click()
sleep(3)
if context.app.instance.findChildren(GenericPredicate(roleName='dialog', name='')):
dialog = context.app.instance.dialog(' ')
dialog.grabFocus()
if send:
dialog.button('Send').doActionNamed('click')
else:
dialog.button('Do not Send').doActionNamed('click')
assert wait_until(lambda x: x.dead, dialog),\
"Meeting invitations dialog was not closed"
assert wait_until(lambda x: x.dead, context.app.event_editor),\
"Meeting editor was not closed"
@step(u'Save the event and close the editor')
def save_event(context):
context.app.event_editor.button('Save and Close').click()
assert wait_until(lambda x: x.dead and not x.showing, context.app.event_editor),\
"Meeting editor is still visible"
@step(u'Set "{field}" field in event editor to "{value}"')
def set_field_in_event_editor(context, field, value):
if field == 'Calendar:':
# This cmb has no 'click' action, so use a custom set of actions
cmb = context.app.event_editor.childLabelled('Calendar:')
cmb.doActionNamed('press')
# Calendars have 4 spaces before the actual name
cmb.menuItem(' %s' % value).click()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
context.app.event_editor.childLabelled(field).text = value
if field == 'Time:':
if ' ' in value:
(day, time) = value.split(' ')
context.app.event_editor.\
childLabelled('Time:').textentry('').text = time
else:
day = value
context.app.event_editor.child('Date').text = day
if field in ["For:", "Until:"]:
combo = context.app.event_editor.\
child(name='for', roleName='menu item').\
findAncestor(GenericPredicate(roleName='combo box'))
field_combovalue = field.lower()[:-1]
if combo.combovalue != field_combovalue:
combo.combovalue = field_combovalue
if field_combovalue == 'for':
(hours, minutes) = value.split(':')
spins = context.app.event_editor.findChildren(
GenericPredicate(roleName='spin button'))
spins[0].text = hours
spins[0].grab_focus()
keyCombo('<Enter>')
spins[1].text = minutes
spins[1].grab_focus()
keyCombo('<Enter>')
else:
filler = context.app.event_editor.child('until').parent.\
findChildren(GenericPredicate(roleName='filler'))[-2]
if ' ' in value:
(day, time) = value.split(' ')
filler.child(roleName='combo box').textentry('').text = time
else:
day = value
filler.child('Date').text = day
if field == 'Timezone:':
context.app.event_editor.button('Select Timezone').click()
dlg = context.app.instance.dialog('Select a Time Zone')
dlg.child('Timezone drop-down combination box').combovalue = value
dlg.button('OK').click()
assert wait_until(lambda x: x.dead, dlg),\
"'Select Time Zone' dialog was not closed"
if field == 'Categories:':
context.app.event_editor.button('Categories...').click()
context.app.categories = context.app.instance.dialog('Categories')
for category in value.split(','):
context.execute_steps(u'* Check "%s" category' % category.strip())
context.execute_steps(u'* Close categories dialog')
@step(u'Set the following fields in event editor')
def set_several_fields(context):
for row in context.table:
set_field_in_event_editor(context, row['Field'], row['Value'])
@step(u'"{field}" field is set to "{value}"')
def field_is_set_to(context, field, value):
value = value.strip()
text_fields = ['Summary:', 'Location:', 'Description:']
if field in text_fields:
actual = context.app.event_editor.childLabelled(field).text
context.assertion.assertEquals(actual, value)
if field == 'Time:':
day = context.app.event_editor.child('Date').text
if ' ' in value:
time = context.app.event_editor.\
childLabelled('Time:').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = context.app.event_editor.childLabelled('Time:').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'For:':
# Ensure that correct value is set in combobox
combo = context.app.event_editor.child(name='for', roleName='combo box')
spins = context.app.event_editor.findChildren(GenericPredicate(roleName='spin button'))
if ' ' in value:
actual = '%s:%s' % (spins[0], spins[1])
context.assertion.assertEquals(actual.lower(), value.lower())
else:
context.assertion.assertFalse(
spins[0].showing, "Time controls are displayed in all day event")
context.assertion.assertFalse(
spins[1].showing, "Time controls are displayed in all day event")
if field == 'Until:':
combo = context.app.event_editor.child(name='until', roleName='combo box')
filler = combo.parent.findChildren(GenericPredicate(roleName='filler'))[-2]
day = filler.child('Date').text
if ' ' in value:
time = filler.child(roleName='combo box').textentry('').text
actual = '%s %s' % (day, time)
context.assertion.assertEquals(actual.lower(), value.lower())
else:
# All day event
context.assertion.assertEquals(day, value)
time_showing = filler.child(roleName='combo box').textentry('').showing
context.assertion.assertFalse(
time_showing, "Time controls are displayed in all day event")
if field == 'Calendar:':
cmb = context.app.event_editor.childLabelled('Calendar:')
actual = cmb.combovalue.strip()
context.assertion.assertEquals(actual, value)
if field == 'Timezone:':
actual = context.app.event_editor.childLabelled('Time zone:').text
context.assertion.assertEquals(actual, value)
if field == 'Categories:':
actual = context.app.event_editor.textentry('Categories').text
context.assertion.assertEquals(actual, value)
@step(u'Event has the following details')
def event_has_fields_set(context):
for row in context.table:
context.execute_steps(u"""
* "%s" field is set to "%s"
""" % (row['Field'], row['Value']))
@step(u'Add "{name}" as attendee')
def add_user_as_attendee_with_role(context, name):
context.app.event_editor.button('Add').click()
# Input user name
typeText(name)
keyCombo('<Enter>')
# Evolution doesn't have a11y set for cell renderers, so role cannot be set
#table = context.app.event_editor.child(roleName='table')
# User will be added as a last row, so last cell is user role selector
#cell = table.findChildren(GenericPredicate(roleName='table cell'))[-1]
#cell.click()
@step(u'Remove "{name}" from attendee list')
def remove_user_from_attendee_list(context, name):
context.app.event_editor.child(name=name, roleName='table cell').click()
context.app.event_editor.button('Remove').click()
@step(u'Select first suggestion as attendee typing "{name}"')
def select_first_suggestion_as_attendee(context, name):
context.app.event_editor.button('Add').click()
typeText(name)
sleep(1)
# Again, cell renderer is not avaiable here
keyCombo("<Down>")
keyCombo("<Enter>")
sleep(0.5)
@then(u'"{user}" as "{role}" is present in attendees list')
def user_with_role_is_present_in_attendees_list(context, user, role):
table = context.app.event_editor.child(roleName='table')
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == user]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % user)
role_cell_index = found_indexes[0] + 1
if role_cell_index > len(cells):
raise AssertionError("Cannot find role cell for user '%s'" % user)
actual = cells[role_cell_index].text
context.assertion.assertEquals(actual, role)
@step(u'The following attendees are present in the list')
def verify_attendees_list_presence(context):
for row in context.table:
context.execute_steps(u"""
Then "%s" as "%s" is present in attendees list
""" % (row['Name'], row['Role']))
@step(u'Open attendees dialog')
def open_attendees_dialog(context):
context.app.event_editor.button('Attendees...').click()
context.app.attendees = context.app.instance.dialog('Attendees')
@step(u'Close attendees dialog')
def close_attendees_dialog(context):
context.app.attendees.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.attendees),\
"Attendees dialog was not closed"
@step(u'Change addressbook to "{name}" in attendees dialog')
def change_addressbook_in_attendees_dialog(context, name):
context.app.attendees.childLabelled('Address Book:').combovalue = ' %s' % name
@step(u'Add "{name}" contact as "{role}" in attendees dialog')
def add_contact_as_role_in_attendees_dialog(context, name, role):
contacts = context.app.attendees.childLabelled('Contacts').child(roleName='table')
contact = contacts.child(name)
contact.select()
btn = context.app.attendees.child('%ss' % role).parent.parent.parent.button('Add')
btn.click()
@step(u'Add "{user}" as "{role}" using Attendees dialog')
def add_contact_as_role_using_attendees_dialog(context, user, role):
context.execute_steps(u"""
* Open attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (user, role))
@step(u'Add "{user}" as "{role}" using Attendees dialog from "{addressbook}" addressbook')
def add_contact_from_addressbook_as_role_using_attendees_dialog(context, user, role, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
* Add "%s" contact as "%s" in attendees dialog
* Close attendees dialog
""" % (addressbook, user, role))
@step(u'Search for "{username}" in Attendees dialog in "{addressbook}" addressbook')
def search_for_user_in_attendees_dialog(context, username, addressbook):
context.execute_steps(u"""
* Open attendees dialog
* Change addressbook to "%s" in attendees dialog
""" % addressbook)
context.app.attendees.childLabelled('Search:').text = username
sleep(1)
@step(u'Show time zone in event editor')
def show_timezone(context):
if not context.app.event_editor.child('Time zone:').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Time Zone').click()
@step(u'Show categories in event editor')
def show_categories(context):
if not context.app.event_editor.textentry('Categories').showing:
context.app.event_editor.menu('View').click()
context.app.event_editor.menu('View').menuItem('Categories').click()
@step(u'Set event start time in {num} minute')
@step(u'Set event start time in {num} minutes')
def set_event_start_time_in(context, num):
time = context.app.event_editor.childLabelled('Time:').textentry('').text
time_object = datetime.datetime.strptime(time.strip(), '%H:%M %p')
new_time_object = time_object + datetime.timedelta(minutes=int(num))
new_time = new_time_object.strftime('%H:%M %p')
context.app.event_editor.childLabelled('Time:').textentry('').text = new_time
context.app.event_editor.childLabelled('Time:').textentry('').keyCombo('<Enter>')
@step(u'Set event start date in {num} day')
@step(u'Set event start date in {num} days')
def set_event_start_date_in(context, num):
date = context.app.event_editor.child('Date').text
date_object = datetime.datetime.strptime(date, '%m/%d/%Y')
new_date_object = date_object + datetime.timedelta(days=int(num))
new_date = new_date_object.strftime('%m/%d/%Y')
context.app.event_editor.child('Date').text = ''
context.app.event_editor.child('Date').typeText(new_date)
context.app.event_editor.childLabelled('Time:').textentry('').click()
@step(u'Open reminders window')
def open_reminders_window(context):
context.app.event_editor.button('Reminders').click()
context.app.reminders = context.app.instance.dialog('Reminders')
@step(u'Select predefined reminder "{name}"')
def select_predefined_reminder(context, name):
context.app.reminders.child(roleName='combo box').combovalue = name
@step(u'Select custom reminder')
def select_custom_reminder(context):
context.app.reminders.child(roleName='combo box').combovalue = 'Customize'
@step(u'Add new reminder with "{action}" {num} {period} {before_after} "{start_end}"')
def add_new_custom_reminder(context, action, num, period, before_after, start_end):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for value in [action, period, before_after, start_end]:
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
spin_button = dialog.child(roleName='spin button')
spin_button.text = num
spin_button.grab_focus()
keyCombo('<Enter>')
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Add new reminder with the following options')
def add_new_reminder_with_following_options(context):
context.app.reminders.button('Add').click()
dialog = context.app.instance.dialog('Add Reminder')
for row in context.table:
if row['Field'] in ['Action', 'Period', 'Before/After', 'Start/End']:
value = row['Value']
combo = dialog.child(value, roleName='menu item').parent.parent
if combo.combovalue != value:
combo.combovalue = value
elif row['Field'] == 'Num':
spin_button = dialog.child(roleName='spin button')
spin_button.text = row['Value']
spin_button.grab_focus()
keyCombo('<Enter>')
elif row['Field'] == 'Message':
dialog.child('Custom message').click()
# dialog.childLabelled('Message:').text = row['Value']
dialog.child(roleName='text').text = row['Value']
else:
dialog.childLabelled(row['Field']).text = row['Value']
dialog.button('OK').click()
assert wait_until(lambda x: x.dead, dialog), "Add Reminder dialog was not closed"
@step(u'Close reminders window')
def close_reminders_window(context):
context.app.reminders.button('Close').click()
assert wait_until(lambda x: not x.showing, context.app.reminders),\
"Reminders dialog was not closed"
@step(u'Appointment reminders window pops up in {num:d} minute')
@step(u'Appointment reminders window pops up in {num:d} minutes')
def appointment_reminders_window_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Appointments')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Appointments window didn't appear"
context.app.alarm_notify = alarm_notify.child(name='Appointments')
@step(u'Appointment reminders window contains reminder for "{name}" event')
def alarm_notify_contains_event(context, name):
reminders = context.app.alarm_notify.findChildren(
GenericPredicate(roleName='table cell'))
matching_reminders = [x for x in reminders if name in x.text]
assert matching_reminders != [], "Cannot find reminder '%s'" % name
@step(u'Application trigger warning pops up in {num} minutes')
def application_trigger_warning_pops_up(context, num):
alarm_notify = root.application('evolution-alarm-notify')
assert wait_until(
lambda x: x.findChildren(GenericPredicate(name='Warning', roleName='dialog')) != [],
element=alarm_notify, timeout=60 * int(num)),\
"Warning window didn't appear"
@step(u'{action} to run the specified program in application trigger warning window')
def action_to_run_specified_program(context, action):
alarm_notify = root.application('evolution-alarm-notify')
dialog = alarm_notify.dialog('Warning')
if action == 'Agree':
dialog.button('Yes').click()
else:
dialog.button('No').click()
@step(u'"{app}" is present in process list')
def app_is_present_in_process_list(context, app):
try:
assert root.application(app)
finally:
os.system("killall gnome-screenshot")
@step(u'"{app}" is not present in process list')
def app_is_not_present_in_process_list(context, app):
try:
app_names = map(lambda x: x.name, root.applications())
assert app not in app_names
finally:
os.system("killall %s" % app)
@step(u'Add "{filepath}" attachment in event editor')
def add_attachement_in_event_editor(context, filepath):
context.app.event_editor.button("Add Attachment...").click()
context.execute_steps(u"""
* file select dialog with name "Add Attachment" is displayed
* in file select dialog I select "%s"
""" % filepath)
@step(u'Save attachment "{name}" in event editor to "{file}"')
def save_attachment_to_file(context, name, file):
# Switch to List View
combo = context.app.event_editor.child(roleName='menu item', name='List View').parent.parent
if combo.name != 'List View':
combo.combovalue = 'List View'
# Right-click on the cell
cells = context.app.event_editor.findChildren(GenericPredicate(roleName='table cell'))
matching_cells = [x for x in cells if name in x.name]
if matching_cells == []:
raise RuntimeError("Cannot find attachment containing '%s'" % name)
cell = matching_cells[0]
cell.click(button=3)
# Get popup menu
popup_menu = context.app.instance.child(name='Add Attachment...', roleName='menu item').parent
popup_menu.child('Save As').click()
context.execute_steps(u"""
* Save attachment "%s" in mail viewer to "%s"
""" % (name, file))
@step(u'Display attendee {field}')
def show_attendee_field(context, field):
context.app.event_editor.menu('View').click()
menuItem = context.app.event_editor.menu('View').menuItem('%s Field' % field.capitalize())
if not menuItem.checked:
menuItem.click()
else:
keyCombo('<Esc>')
def get_contact_parameter_by_name(context, contact_name, column):
# Get attendees table
table = context.app.event_editor.child(roleName='table')
# Get header offset
headers = table.findChildren(GenericPredicate(roleName='table column header'))
header_names = [x.name for x in headers]
offset = header_names.index(column)
# Get table cells
cells = table.findChildren(GenericPredicate(roleName='table cell'))
found_indexes = [cells.index(c) for c in cells if c.text == str(contact_name)]
if found_indexes == []:
raise AssertionError("User '%s' was not found in attendees list" % contact_name)
cell_index = found_indexes[0] + offset
if cell_index > len(cells):
raise AssertionError("Cannot find '%s' cell for user '%s'" % (column, contact_name))
return cells[cell_index]
@step(u'Attendee "{name}" has "{status}" status')
def attendee_has_status(context, name, status):
actual = get_contact_parameter_by_name(context, name, 'Status').text
context.assertion.assertEquals(actual, status)
| gpl-2.0 | -8,544,367,351,086,763,000 | 38.188612 | 98 | 0.65583 | false |
elioth010/lugama | src/model/orm/Model.py | 1 | 1482 | '''
Created on Jan 8, 2016
@author: elioth010
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.sql.expression import text
from model.orm.DB import DB
class Model(DB):
'''
classdocs
'''
base = None
SessionFactory = None
session = None
def __init__(self):
'''
Constructor
'''
self.base = declarative_base()
self.SessionFactory = sessionmaker(bind=self.engine)
self.session = self.SessionFactory()
def save(self):
self.session = self.SessionFactory()
try:
self.session.add(self)
self.session.commit()
except:
self.session.rollback()
raise
def where(self, *args):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter_by(args).all()
except:
self.session.rollback()
raise
def find(self, id_table):
self.session = self.SessionFactory()
try:
return self.session.query(self).filter(text('id='+id_table)).all()
except:
self.session.rollback()
raise
def delete(self):
self.session = self.SessionFactory()
try:
self.session.delete(self)
self.session.commit()
except:
self.session.rollback()
raise
| gpl-2.0 | -388,907,434,122,378,400 | 22.903226 | 78 | 0.557355 | false |
mushtaqak/edx-platform | lms/envs/devstack.py | 1 | 6327 | """
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| agpl-3.0 | 8,309,838,323,921,532,000 | 32.47619 | 126 | 0.595543 | false |
adbar/htmldate | htmldate/validators.py | 1 | 7114 | # pylint:disable-msg=E0611,I1101
"""
Filters for date parsing and date validators.
"""
## This file is available from https://github.com/adbar/htmldate
## under GNU GPL v3 license
# standard
import datetime
import logging
import time
from collections import Counter
from functools import lru_cache
from .settings import MIN_DATE, MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR
LOGGER = logging.getLogger(__name__)
LOGGER.debug('date settings: %s %s %s', MIN_YEAR, LATEST_POSSIBLE, MAX_YEAR)
@lru_cache(maxsize=32)
def date_validator(date_input, outputformat, earliest=MIN_DATE, latest=LATEST_POSSIBLE):
"""Validate a string w.r.t. the chosen outputformat and basic heuristics"""
# try if date can be parsed using chosen outputformat
if not isinstance(date_input, datetime.date):
# speed-up
try:
if outputformat == '%Y-%m-%d':
dateobject = datetime.datetime(int(date_input[:4]),
int(date_input[5:7]),
int(date_input[8:10]))
# default
else:
dateobject = datetime.datetime.strptime(date_input, outputformat)
except ValueError:
return False
else:
dateobject = date_input
# basic year validation
year = int(datetime.date.strftime(dateobject, '%Y'))
if MIN_YEAR <= year <= MAX_YEAR:
# not newer than today or stored variable
try:
if earliest <= dateobject.date() <= latest:
return True
except AttributeError:
if earliest <= dateobject <= latest:
return True
LOGGER.debug('date not valid: %s', date_input)
return False
def output_format_validator(outputformat):
"""Validate the output format in the settings"""
# test in abstracto
if not isinstance(outputformat, str) or not '%' in outputformat:
logging.error('malformed output format: %s', outputformat)
return False
# test with date object
dateobject = datetime.datetime(2017, 9, 1, 0, 0)
try:
dateobject.strftime(outputformat)
except (NameError, TypeError, ValueError) as err:
logging.error('wrong output format or format type: %s %s', outputformat, err)
return False
return True
@lru_cache(maxsize=32)
def plausible_year_filter(htmlstring, pattern, yearpat, tocomplete=False):
"""Filter the date patterns to find plausible years only"""
# slow!
allmatches = pattern.findall(htmlstring)
occurrences = Counter(allmatches)
toremove = set()
# LOGGER.debug('occurrences: %s', occurrences)
for item in occurrences.keys():
# scrap implausible dates
try:
if tocomplete is False:
potential_year = int(yearpat.search(item).group(1))
else:
lastdigits = yearpat.search(item).group(1)
if lastdigits[0] == '9':
potential_year = int('19' + lastdigits)
else:
potential_year = int('20' + lastdigits)
except AttributeError:
LOGGER.debug('not a year pattern: %s', item)
toremove.add(item)
else:
if potential_year < MIN_YEAR or potential_year > MAX_YEAR:
LOGGER.debug('no potential year: %s', item)
toremove.add(item)
# occurrences.remove(item)
# continue
# preventing dictionary changed size during iteration error
for item in toremove:
del occurrences[item]
return occurrences
def compare_values(reference, attempt, outputformat, original_date):
"""Compare the date expression to a reference"""
timestamp = time.mktime(datetime.datetime.strptime(attempt, outputformat).timetuple())
if original_date is True:
if reference == 0 or timestamp < reference:
reference = timestamp
else:
if timestamp > reference:
reference = timestamp
return reference
@lru_cache(maxsize=32)
def filter_ymd_candidate(bestmatch, pattern, original_date, copyear, outputformat, min_date, max_date):
"""Filter free text candidates in the YMD format"""
if bestmatch is not None:
pagedate = '-'.join([bestmatch.group(1), bestmatch.group(2), bestmatch.group(3)])
if date_validator(pagedate, '%Y-%m-%d', earliest=min_date, latest=max_date) is True:
if copyear == 0 or int(bestmatch.group(1)) >= copyear:
LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
return convert_date(pagedate, '%Y-%m-%d', outputformat)
## TODO: test and improve
#if original_date is True:
# if copyear == 0 or int(bestmatch.group(1)) <= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
#else:
# if copyear == 0 or int(bestmatch.group(1)) >= copyear:
# LOGGER.debug('date found for pattern "%s": %s', pattern, pagedate)
# return convert_date(pagedate, '%Y-%m-%d', outputformat)
return None
def convert_date(datestring, inputformat, outputformat):
"""Parse date and return string in desired format"""
# speed-up (%Y-%m-%d)
if inputformat == outputformat:
return str(datestring)
# date object (speedup)
if isinstance(datestring, datetime.date):
return datestring.strftime(outputformat)
# normal
dateobject = datetime.datetime.strptime(datestring, inputformat)
return dateobject.strftime(outputformat)
def check_extracted_reference(reference, outputformat, min_date, max_date):
'''Test if the extracted reference date can be returned'''
if reference > 0:
dateobject = datetime.datetime.fromtimestamp(reference)
converted = dateobject.strftime(outputformat)
if date_validator(converted, outputformat, earliest=min_date, latest=max_date) is True:
return converted
return None
def get_min_date(min_date):
'''Validates the minimum date and/or defaults to earliest plausible date'''
if min_date is not None:
try:
# internal conversion from Y-M-D format
min_date = datetime.date(int(min_date[:4]),
int(min_date[5:7]),
int(min_date[8:10]))
except ValueError:
min_date = MIN_DATE
else:
min_date = MIN_DATE
return min_date
def get_max_date(max_date):
'''Validates the maximum date and/or defaults to latest plausible date'''
if max_date is not None:
try:
# internal conversion from Y-M-D format
max_date = datetime.date(int(max_date[:4]),
int(max_date[5:7]),
int(max_date[8:10]))
except ValueError:
max_date = LATEST_POSSIBLE
else:
max_date = LATEST_POSSIBLE
return max_date
| gpl-3.0 | -6,857,535,308,415,212,000 | 36.640212 | 103 | 0.606129 | false |
openqt/algorithms | extras/kaprekar_number.py | 1 | 1328 | # coding=utf-8
"""
卡布列克数
http://group.jobbole.com/26887/
有一种数被称为卡布列克数,其形式如:45 * 45 = 2025 并且 20+25=45,这样 45 就是一个
卡布列克数。
它标准定义如下:
若正整数X在N进制下的平方可以分割为二个数字,而这二个数字相加后恰等于X,那么X就是
N进制下的卡布列克数。
分解后的数字必须是正整数才可以,例如:10*10=100 并且 10+0=10,因为0不是正整数,
所以10不是卡布列克数。
现在题目的要求是给定你一个范围[a,b](b大于等于a,a大于等于0),你需要把这个范围内的
卡布列克数全部输出。
样例如下:
输入:2 100
输出:9 45 55 99
"""
from __future__ import print_function
def is_kaprekar(n):
level, sq = 10, n * n
while level < sq:
a, b = divmod(sq, level)
if b > 0 and a + b == n:
return level
level *= 10
return 0
def kaprekar_number(start, stop=None):
while True:
if is_kaprekar(start):
yield start
if stop and start >= stop:
break
start += 1
if __name__ == '__main__':
print(is_kaprekar(45))
print(is_kaprekar(40))
print(is_kaprekar(100))
print([i for i in kaprekar_number(2, 1000)])
| gpl-3.0 | -7,367,578,442,279,746,000 | 17.88 | 55 | 0.595339 | false |
google-research/google-research | simulation_research/signal_processing/spherical/spherical_harmonics.py | 1 | 5602 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A library for computing spherical harmonics.
The spherical harmonics are special functions defined on the surface of a
sphere, which are often used to solve partial differential equations in many
scientific applications. A physical field defined on the surface of a sphere can
be written as a linear superposition of the spherical harmonics as the latter
form a complete set of orthogonal basis functions. The set of spherical
harmonics denoted `Y_l^m(θ, φ)` is often called Laplace's spherical
harmonics of degree `l` and order `m` and `θ` and `φ` are colatitude and
longitude, respectively. In addition, the spherical harmonics can be expressed
as `Y_l^m(θ, φ) = P_l^m(θ) \exp(i m φ)`, in which
`P_l^m(θ)` is the associated Legendre function with embedded normalization
constant \sqrt(1 / (4 𝛑)). We refer to the function f(θ, φ) with finite induced
norm as the signal on the sphere, where the colatitude θ ∈ [0, π] and longitude
φ ∈ [0, 2π). The signal on the sphere can be written as a linear superpostiion
of the spherical harmoincs, which form a complete set of orthonormal basis
functions for degree l ≥ 0 and order |m| ≤ l. In this library, θ and φ can be
non-uniformly sampled.
"""
import jax.numpy as jnp
import numpy as np
from simulation_research.signal_processing.spherical import associated_legendre_function
class SphericalHarmonics(object):
"""Computes the spherical harmonics on TPUs."""
def __init__(self,
l_max,
theta,
phi):
"""Constructor.
Args:
l_max: The maximum degree of the associated Legendre function. The degrees
are `[0, 1, 2, ..., l_max]`. The orders `m` are `[-l_max, -l_max+1,
0, 1, ..., l_max]`.
theta: A vector containing the sampling points along the colatitude
dimension. The associated Legendre functions are computed at
`cos(θ)`.
phi: A vector containing the sampling points along the longitude, at which
the Vandermonde matrix is computed.
"""
self.l_max = l_max
self.theta = theta
self._cos_theta = jnp.cos(theta)
self.phi = phi
self._legendre = associated_legendre_function.gen_normalized_legendre(
self.l_max, self._cos_theta)
self._vandermonde = self._gen_vandermonde_mat(self.l_max, self.phi)
def _gen_vandermonde_mat(self, l_max, phi):
"""Generates the Vandermonde matrix exp(i m φ).
The Vandermonde matrix has the first dimension along the degrees of the
spherical harmonics and the second dimension along the longitude.
Args:
l_max: See `init`.
phi: See `init`.
Returns:
A complex matrix.
"""
nonnegative_degrees = jnp.arange(l_max+1)
mat_dim0, mat_dim1 = jnp.meshgrid(nonnegative_degrees, phi, indexing='ij')
num_phi = phi.shape[0]
def vandermonde_fn(mat_dim0, mat_dim1, num_pts):
coeff = 1j / num_pts
return jnp.exp(coeff * jnp.multiply(mat_dim0, mat_dim1))
return vandermonde_fn(mat_dim0, mat_dim1, num_phi)
def harmonics_nonnegative_order(self):
"""Computes the spherical harmonics of nonnegative orders.
Returns:
A 4D complex tensor of shape `(l_max + 1, l_max + 1, num_theta, num_phi)`,
where the dimensions are in the sequence of degree, order, colatitude, and
longitude.
"""
return jnp.einsum('ijk,jl->ijkl', self._legendre, self._vandermonde)
def _gen_mask(self):
"""Generates the mask of (-1)^m, m = [0, 1, ..., l_max]."""
mask = np.empty((self.l_max + 1,))
mask[::2] = 1
mask[1::2] = -1
return jnp.asarray((mask))
def harmonics_nonpositive_order(
self, harmonics_nonnegative_order = None):
"""Computes the spherical harmonics of nonpositive orders.
With normalization, the nonnegative order Associated Legendre functions are
`P_l^{-m}(x) = (−1)^m P_l^m(x)`, which implies that
`Y_l^{-m}(θ, φ) = (−1)^m conjugate(Y_l^m(θ, φ))`.
Args:
harmonics_nonnegative_order: A 4D complex tensor representing the
harmonics of nonnegative orders, the shape of which is
`(l_max + 1, l_max + 1, num_theta, num_phi)` andd the dimensions are in
the sequence of degree, order, colatitude, and longitude.
Returns:
A 4D complex tensor of the same shape as `harmonics_nonnegative_order`
representing the harmonics of nonpositive orders.
"""
if harmonics_nonnegative_order is None:
harmonics_nonnegative_order = self.harmonics_nonnegative_order()
mask = self._gen_mask()
return jnp.einsum(
'j,ijkl->ijkl', mask, jnp.conjugate(harmonics_nonnegative_order))
@property
def associated_legendre_fn(self):
"""Associated Legendre function values.
Returns:
A 3D tensor of shape `(l_max + 1, l_max + 1, num_theta)` containing the
values of the associated Legendre functions, the dimensions of which is in
the sequence of degree, order, and colatitude.
"""
return self._legendre
| apache-2.0 | -9,107,182,008,301,480,000 | 38.183099 | 88 | 0.687275 | false |
supermurat/hamsi-manager | Bars/ToolsBar.py | 1 | 8838 | # This file is part of HamsiManager.
#
# Copyright (c) 2010 - 2015 Murat Demir <[email protected]>
#
# Hamsi Manager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Hamsi Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HamsiManager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Core import Universals as uni
from Core.MyObjects import *
from Core import ReportBug
import Bars
class ToolsBar(MToolBar):
def __init__(self, _parent):
MToolBar.__init__(self, _parent)
_parent.addToolBar(Mt.TopToolBarArea, self)
self.setWindowTitle(translate("ToolsBar", "Tools"))
self.setObjectName("Tools")
self.clearEmptyDirectories = MAction(MIcon("Images:clearEmptyDirectories.png"),
translate("ToolsBar", "Clear Empty Directories"), self)
self.clearEmptyDirectories.setObjectName("Clear Empty Directories")
self.clearEmptyDirectories.setToolTip(
translate("ToolsBar", "Clears the folder contents based on the criteria set."))
if uni.isActiveDirectoryCover:
self.actCheckIcon = MAction(MIcon("Images:checkIcon.png"),
translate("ToolsBar", "Check Icon"), self)
self.actCheckIcon.setObjectName("Check Icon")
self.actCheckIcon.setToolTip(translate("ToolsBar", "Checks the icon for the folder you are currently in."))
self.actHash = MAction(MIcon("Images:hash.png"),
translate("ToolsBar", "Hash"), self)
self.actHash.setObjectName("Hash")
self.actHash.setToolTip(translate("ToolsBar", "Hash manager"))
self.actPack = MAction(MIcon("Images:pack.png"),
translate("ToolsBar", "Pack"), self)
self.actPack.setObjectName("Pack")
self.actPack.setToolTip(translate("ToolsBar", "Packs the current folder."))
self.actFileTree = MAction(MIcon("Images:fileTree.png"),
translate("ToolsBar", "File Tree"), self)
self.actFileTree.setObjectName("File Tree")
self.actFileTree.setToolTip(translate("ToolsBar", "Get file tree of current folder."))
self.actClear = MAction(MIcon("Images:clear.png"),
translate("ToolsBar", "Clear"), self)
self.actClear.setObjectName("Clear")
self.actClear.setToolTip(translate("ToolsBar", "Clears the current folder."))
self.actTextCorrector = MAction(MIcon("Images:textCorrector.png"),
translate("ToolsBar", "Text Corrector"), self)
self.actTextCorrector.setObjectName("Text Corrector")
self.actTextCorrector.setToolTip(translate("ToolsBar", "Corrects text files."))
self.actRemoveOnlySubFiles = MAction(MIcon("Images:removeOnlySubFiles.png"),
translate("ToolsBar", "Remove Sub Files"), self)
self.actRemoveOnlySubFiles.setObjectName("Remove Sub Files")
self.actRemoveOnlySubFiles.setToolTip(
translate("ToolsBar", "Remove only all sub files.Do not will remove directory and subfolders."))
self.actSearch = MAction(MIcon("Images:search.png"),
translate("ToolsBar", "Search"), self)
self.actSearch.setObjectName("Search")
self.actSearch.setToolTip(translate("ToolsBar", "Special search tool"))
self.actScriptManager = MAction(MIcon("Images:scriptManager.png"),
translate("ToolsBar", "Script Manager"), self)
self.actScriptManager.setObjectName("Script Manager")
self.actScriptManager.setToolTip(translate("ToolsBar", "You can do what you want."))
if uni.getBoolValue("isSaveActions"):
self.actLastActions = MAction(MIcon("Images:lastActions.png"),
translate("ToolsBar", "Show Last Actions"), self)
self.actLastActions.setObjectName("Show Last Actions")
self.actLastActions.setToolTip(translate("ToolsBar", "You can see last actions."))
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
self.actAmarokEmbeddedDBConfigurator = MAction(MIcon("Images:amarokEmbeddedDBConfigurator.png"),
translate("ToolsBar",
"Amarok Embedded Database Configurator"), self)
self.actAmarokEmbeddedDBConfigurator.setObjectName("Amarok Embedded Database Configurator")
self.actAmarokEmbeddedDBConfigurator.setToolTip(translate("ToolsBar", "Packs the current folder."))
self.addAction(self.actHash)
self.addAction(self.actPack)
self.addAction(self.actFileTree)
self.addAction(self.actClear)
self.addAction(self.actTextCorrector)
self.addAction(self.actSearch)
self.addAction(self.actScriptManager)
if uni.getBoolValue("isSaveActions"):
self.addAction(self.actLastActions)
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
self.addAction(self.actAmarokEmbeddedDBConfigurator)
self.addSeparator()
self.addAction(self.clearEmptyDirectories)
self.addAction(self.actRemoveOnlySubFiles)
if uni.isActiveDirectoryCover:
self.addAction(self.actCheckIcon)
self.setIconSize(MSize(16, 16))
getMainWindow().Menu.mTools = MMenu(translate("MenuBar", "Tools"), self)
getMainWindow().Menu.mTools.setObjectName("Tools")
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actHash))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actPack))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actFileTree))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actClear))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actTextCorrector))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actSearch))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actScriptManager))
if uni.getBoolValue("isSaveActions"):
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actLastActions))
if uni.isActiveAmarok and uni.getBoolValue("amarokIsUseHost") is False:
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actAmarokEmbeddedDBConfigurator))
getMainWindow().Menu.mTools.addSeparator()
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.clearEmptyDirectories))
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actRemoveOnlySubFiles))
if uni.isActiveDirectoryCover:
getMainWindow().Menu.mTools.addAction(Bars.getCopyOfMAction(self.actCheckIcon))
getMainWindow().Menu.insertMenu(getMainWindow().Menu.mSettings.menuAction(), getMainWindow().Menu.mTools)
self.createScriptsMenu(_parent)
MObject.connect(self, SIGNAL("actionTriggered(QAction *)"), Bars.clickedAnAction)
def createScriptsMenu(self, _parent):
getMainWindow().Menu.mScripts = MMenu(translate("MenuBar", "Scripts"), self)
getMainWindow().Menu.mScripts.setObjectName("Scripts")
from Core import Scripts
_parent.scriptList = Scripts.getScriptList()
for scriptName in _parent.scriptList:
actScript = MAction(str(scriptName), getMainWindow().Menu.mScripts)
actScript.setObjectName(str(scriptName))
actScript.setToolTip(str(str(translate("ToolsBar", "Execute \"%s\" Named Script")) % scriptName))
getMainWindow().Menu.mScripts.addAction(actScript)
actScriptManager = MAction(MIcon("Images:scriptManager.png"),
translate("ToolsBar", "Script Manager"), self)
actScriptManager.setObjectName("Script Manager")
actScriptManager.setToolTip(translate("ToolsBar", "You can do what you want."))
getMainWindow().Menu.mScripts.addAction(actScriptManager)
getMainWindow().Menu.insertMenu(getMainWindow().Menu.mSettings.menuAction(), getMainWindow().Menu.mScripts)
| gpl-3.0 | 3,117,259,295,621,827,600 | 60.804196 | 119 | 0.669609 | false |
wangyanxing/Judge-at-fgdsb | judge/python/tests/coin_change_2.py | 1 | 1260 | from common import *
from solution import *
import copy
import sys
import datetime
num_test = 303
true, false = True, False
in_0 = []
in_org_0 = []
in_1 = []
in_org_1 = []
out = []
def load_test():
f = open('judge/tests/coin-change-2.txt', 'r')
global in_0, in_org_0
in_0 = read_int_matrix(f)
in_org_0 = copy.deepcopy(in_0)
global in_1, in_org_1
in_1 = read_int_array(f)
in_org_1 = copy.deepcopy(in_1)
global out
out = read_int_array(f)
f.close
def judge():
load_test()
capture_stdout()
start_time = datetime.datetime.now()
for i in range(num_test):
print ('Testing case #' + str(i+1))
answer = count_changes(in_0[i], in_1[i])
if (answer != out[i]):
release_stdout()
out_str = str(i+1) + " / " + str(num_test) + ";"
out_str += str(in_org_0[i])
out_str += ", "
out_str += str(in_org_1[i])
out_str += ";"
out_str += str(answer)
out_str += ";"
out_str += str(out[i])
print(out_str)
return
release_stdout()
delta = datetime.datetime.now() - start_time
runtime = str(int(delta.total_seconds() * 1000))
print('Accepted;' + runtime)
| mit | 7,145,297,501,625,959,000 | 23.705882 | 60 | 0.520635 | false |
tensorflow/ecosystem | data_service/tf_std_data_server.py | 1 | 2000 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a tf.data service server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.compat.v1.app.flags
flags.DEFINE_integer("port", 0, "Port to listen on")
flags.DEFINE_bool("is_dispatcher", False, "Whether to start a dispatcher (as opposed to a worker server")
flags.DEFINE_string("dispatcher_address", "", "The address of the dispatcher. This is only needed when starting a worker server.")
flags.DEFINE_string("worker_address", "", "The address of the worker server. This is only needed when starting a worker server.")
FLAGS = flags.FLAGS
def main(unused_argv):
if FLAGS.is_dispatcher:
print("Starting tf.data service dispatcher")
server = tf.data.experimental.service.DispatchServer(
tf.data.experimental.service.DispatcherConfig(
port=FLAGS.port,
protocol="grpc"))
else:
print("Starting tf.data service worker")
server = tf.data.experimental.service.WorkerServer(
tf.data.experimental.service.WorkerConfig(
port=FLAGS.port,
protocol="grpc",
dispatcher_address=FLAGS.dispatcher_address,
worker_address=FLAGS.worker_address))
server.join()
if __name__ == "__main__":
tf.compat.v1.app.run()
| apache-2.0 | 4,136,536,005,482,894,300 | 38.215686 | 130 | 0.688 | false |
abramhindle/slowdraw | slowdraw.py | 1 | 5288 | #!/usr/bin/env python
''' Slowdraw watches an image file and makes animations out of the changes
'''
import sys
import cv2
import cv
import numpy as np
import logging
import time
import argparse
import watchdog
import os.path
import pickle
import math
from watchdog.observers import Observer
parser = argparse.ArgumentParser(description='slowdraw')
parser.add_argument('-W', default=1024, help='Width of window')
parser.add_argument('-H', default=768, help='Height of window')
parser.add_argument('-strict', default=False, help='Strictness')
parser.add_argument('path', help='Path of file to watch')
args = parser.parse_args()
full_w = int(args.W)
full_h = int(args.H)
strictness = bool(args.strict)
def new_rgb(width,height):
return np.zeros((height,width,3), np.uint8)
fullscreen_buffer = new_rgb(full_w,full_h)
logging.basicConfig(stream = sys.stderr, level=logging.INFO)
load_queue = []
class ModListener(watchdog.events.FileSystemEventHandler):
def __init__(self, handler):
super(ModListener, self).__init__()
self.queue = []
self.handler = handler;
def on_modified(self, event):
logging.info("Modified: "+event.src_path)
if ((not strictness and
os.path.dirname(args.path) == os.path.dirname(event.src_path))
or event.src_path == args.path):
logging.info( "Recorded Modified: " + event.src_path )
self.queue.append( event.src_path )
self.handler( event.src_path )
window_name = "slowdraw"
fullscreen = False
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN | cv2.WINDOW_OPENGL)
def start_fullscreen():
global fullscreen
global window_name
if not fullscreen:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
fullscreen = True
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, 0)
fullscreen = False
frame1 = cv2.imread(args.path)
w,h,_ = frame1.shape
frames = [frame1]
curr_frame = 0
done = False
def handle_frame(fname):
if (len(fname) > 4 and fname[-4:] == ".png"):
newframe = cv2.imread(fname)
frames.append(newframe)
mod_listener = ModListener(handle_frame)
observer = Observer()
directory = os.path.dirname(args.path)
observer.schedule(mod_listener, directory, recursive=True)
observer.start()
maxtime = 1000/2
mintime = 1000/30
# 2 4 8 16 32 64 128 256 512
maxtimes = [2000,2000,2000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]
mintimes = [1000,1000,1000, 1000, 500, 200, 100, 50, 50, 50]
def get_times(nframes):
index = int(math.ceil(math.log(nframes) / math.log(2)))
if index >= len(maxtimes):
return maxtimes[-1], mintimes[-1]
else:
return maxtimes[index], mintimes[index]
def scalexp(v,mint,maxt,scale=5):
mine = math.exp(1.0)/math.exp(scale)
maxe = 1.0
vs = math.exp(1 + (scale-1)*v)/math.exp(scale)
vs = (vs - mine)/(maxe - mine)
return vs * (maxt - mint) + mint
def linscale(v,mint,maxt):
return v*(maxt-mint) + mint
def maintain_aspect(maxx,maxy,x,y):
wr = maxx/float(x)
hr = maxy/float(y)
if hr*y <= maxy or hr*x <= maxx:
return (int(hr*x),int(hr*y))
else:
return (int(wr*x),int(wr*y))
# maintain_aspect(1024,768,640,480)==(1024,768)
# maintain_aspect(1024,768,608,472)==(989,768)
# maintain_aspect(1024,768,random.randint(1,1324),random.randint(1,1324))
fourcc = cv2.cv.FOURCC(*'XVID')
writer = cv2.VideoWriter("slowdraw.avi",fourcc,30,(h,w),1)
frametime = 1000.0/30.0
resized_frame = None
fs_offset_x = 0
fs_offset_y = 0
cv2.imshow('slowdraw', fullscreen_buffer )
try:
while not done:
framen = curr_frame % len(frames)
frame = frames[curr_frame % len(frames)]
#if resized_frame == None:
# (lh,lw,depth) = frame.shape
# ratio = float(full_h)/float(lh)
# (resized_w,resized_h) = maintain_aspect(full_w,full_h,lw,lh)
# resized_frame = new_rgb(resized_w,resized_h)
# fs_offset_x = (full_w - resized_w)/2
# fs_offset_y = (full_h - resized_h)/2
# print "%s %s %s %s" % (resized_w,resized_h,fs_offset_x, fs_offset_y)
#resized_frame[:,:] = cv2.resize(frame,(resized_w,resized_h))
#fullscreen_buffer[fs_offset_y:fs_offset_y+resized_h , fs_offset_x:fs_offset_x+resized_w] = resized_frame
cv2.imshow('slowdraw', frame )
#print "%s,%s,%s" % fullscreen_buffer.shape
#cv2.imshow('slowdraw', fullscreen_buffer )
tmaxtime, tmintime = get_times(len(frames))
wait = scalexp( (framen + 1.0) / len(frames) , tmintime,tmaxtime)
print(wait,tmaxtime,tmintime)
curr_frame += 1
for i in range(0,max(1,int(wait/frametime))):
# print("Writing frame %s %s %s" % (i,wait,wait/frametime))
writer.write(frame)
# TODO: fix the wait time
k = cv2.waitKey(int(wait)) & 0xff
if k == 27:
done = True
continue
if k == ord('f'):
start_fullscreen()
except KeyboardInterrupt:
observer.stop()
# pickle.dump(frames,file('slowdraw.pkl','wb'))
writer.release()
observer.stop()
observer.join()
| gpl-3.0 | 8,090,950,691,469,998,000 | 28.707865 | 114 | 0.628404 | false |
rhelmer/socorro-lib | socorro/unittest/external/postgresql/test_backfill.py | 1 | 12534 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .unittestbase import PostgreSQLTestCase
from nose.plugins.attrib import attr
from nose.tools import eq_, assert_raises
import datetime
from socorro.external.postgresql.backfill import Backfill
from socorro.external.postgresql import staticdata, fakedata
from socorro.external import MissingArgumentError
from socorro.lib import datetimeutil
#==============================================================================
@attr(integration='postgres')
class TestBackfill(PostgreSQLTestCase):
"""Tests the calling of all backfill functions"""
#--------------------------------------------------------------------------
def setUp(self):
""" Populate tables with fake data """
super(TestBackfill, self).setUp()
cursor = self.connection.cursor()
self.tables = []
for table in staticdata.tables + fakedata.tables:
# staticdata has no concept of duration
if table.__module__ == 'socorro.external.postgresql.staticdata':
table = table()
else:
table = table(days=1)
table.releases = {
'WaterWolf': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{[email protected]}'
},
'B2G': {
'channels': {
'Nightly': {
'versions': [{
'number': '18.0',
'probability': 0.5,
'buildid': '%s000020'
}],
'adu': '10',
'repository': 'nightly',
'throttle': '1',
'update_channel': 'nightly',
},
},
'crashes_per_hour': '5',
'guid': '{[email protected]}'
}
}
table_name = table.table
table_columns = table.columns
values = str(tuple(["%(" + i + ")s" for i in table_columns]))
columns = str(tuple(table_columns))
self.tables.append(table_name)
# TODO: backfill_reports_clean() sometimes tries to insert a
# os_version_id that already exists
if table_name is not "os_versions":
for rows in table.generate_rows():
data = dict(zip(table_columns, rows))
query = "INSERT INTO %(table)s " % {'table': table_name}
query = query + columns.replace("'", "").replace(",)", ")")
query = query + " VALUES "
query = query + values.replace(",)", ")").replace("'", "")
cursor.execute(query, data)
self.connection.commit()
#--------------------------------------------------------------------------
def tearDown(self):
""" Cleanup the database, delete tables and functions """
cursor = self.connection.cursor()
tables = str(self.tables).replace("[", "").replace("]", "")
cursor.execute("TRUNCATE " + tables.replace("'", "") + " CASCADE;")
self.connection.commit()
self.connection.close()
super(TestBackfill, self).tearDown()
#--------------------------------------------------------------------------
def setup_data(self):
self.now = datetimeutil.utc_now()
now = self.now.date()
yesterday = now - datetime.timedelta(days=1)
lastweek = now - datetime.timedelta(days=7)
now_str = datetimeutil.date_to_string(now)
yesterday_str = datetimeutil.date_to_string(yesterday)
lastweek_str = datetimeutil.date_to_string(lastweek)
self.test_source_data = {
# Test backfill_adu
'adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_all_dups
'all_dups': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# Test backfill_build_adu
'build_adu': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_correlations
'correlations': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user_build
'crashes_by_user_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_crashes_by_user
'crashes_by_user': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_daily_crashes tries to insert into a table
# that do not exists. It can be fixed by creating a temporary one.
#'daily_crashes': {
# 'params': {
# "update_day": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_exploitability
'exploitability': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_explosiveness
'explosiveness': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph_build
'home_page_graph_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_home_page_graph
'home_page_graph': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_matviews
'matviews': {
'params': {
"start_date": yesterday_str,
"reports_clean": 'false',
},
'res_expected': [(True,)],
},
# Test backfill_nightly_builds
'nightly_builds': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_rank_compare
'rank_compare': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_reports_clean
'reports_clean': {
'params': {
"start_date": yesterday_str,
"end_date": now_str,
},
'res_expected': [(True,)],
},
# TODO: Test backfill_reports_duplicates tries to insert into a
# table that do not exists. It can be fixed by using the update
# function inside of the backfill.
#'reports_duplicates': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# TODO: Test backfill_signature_counts tries to insert into
# tables and to update functions that does not exist.
#'signature_counts': {
# 'params': {
# "start_date": yesterday_str,
# "end_date": now_str,
# },
# 'res_expected': [(True,)],
# },
# Test backfill_tcbs_build
'tcbs_build': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_tcbs
'tcbs': {
'params': {
"update_day": yesterday_str,
},
'res_expected': [(True,)],
},
# Test backfill_weekly_report_partitions
'weekly_report_partitions': {
'params': {
"start_date": lastweek_str,
"end_date": now_str,
"table_name": 'raw_crashes',
},
'res_expected': [(True,)],
},
# TODO: Update Backfill to support signature_summary backfill
# through the API
#'signature_summary_products': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_installations': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_uptime': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_os': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_process_type': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_architecture': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_flash_version': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_device': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
#'signature_summary_graphics': {
# 'params': {
# "update_day": yesterday_str,
# },
# 'res_expected': [(True,)],
#},
}
#--------------------------------------------------------------------------
def test_get(self):
backfill = Backfill(config=self.config)
#......................................................................
# Test raise error if kind of backfill is not passed
params = {"backfill_type": ''}
assert_raises(MissingArgumentError, backfill.get, **params)
#......................................................................
# Test all the backfill functions
self.setup_data()
for test, data in self.test_source_data.items():
data['params']['backfill_type'] = str(test)
res = backfill.get(**data['params'])
eq_(res[0], data['res_expected'][0])
| mpl-2.0 | 5,271,359,420,006,023,000 | 35.017241 | 79 | 0.390618 | false |
kylewray/nova | python/nova/nova_pomdp.py | 1 | 6240 | """ The MIT License (MIT)
Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ctypes as ct
import platform
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))))
import nova_pomdp_alpha_vectors as npav
import pomdp_alpha_vectors as pav
# Check if we need to create the nova variable. If so, import the correct library
# file depending on the platform.
#try:
# _nova
#except NameError:
_nova = None
if platform.system() == "Windows":
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.dll"))
else:
_nova = ct.CDLL(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "lib", "libnova.so"))
class NovaPOMDP(ct.Structure):
""" The C struct POMDP object. """
_fields_ = [("n", ct.c_uint),
("ns", ct.c_uint),
("m", ct.c_uint),
("z", ct.c_uint),
("r", ct.c_uint),
("rz", ct.c_uint),
("gamma", ct.c_float),
("horizon", ct.c_uint),
("S", ct.POINTER(ct.c_int)),
("T", ct.POINTER(ct.c_float)),
("O", ct.POINTER(ct.c_float)),
("R", ct.POINTER(ct.c_float)),
("Z", ct.POINTER(ct.c_int)),
("B", ct.POINTER(ct.c_float)),
("d_S", ct.POINTER(ct.c_int)),
("d_T", ct.POINTER(ct.c_float)),
("d_O", ct.POINTER(ct.c_float)),
("d_R", ct.POINTER(ct.c_float)),
("d_Z", ct.POINTER(ct.c_int)),
("d_B", ct.POINTER(ct.c_float)),
]
# Functions from 'pomdp_model_cpu.h'.
_nova.pomdp_initialize_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # n
ct.c_uint, # ns
ct.c_uint, # m
ct.c_uint, # z
ct.c_uint, # r
ct.c_uint, # rz
ct.c_float, # gamma
ct.c_uint) # horizon
_nova.pomdp_belief_update_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(ct.c_float), # b
ct.c_uint, # a
ct.c_uint, # o
ct.POINTER(ct.POINTER(ct.c_float))) # bp
_nova.pomdp_add_new_raw_beliefs_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numBeliefPointsToAdd
ct.POINTER(ct.c_float)) # Bnew
_nova.pomdp_uninitialize_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_cpu.h'.
_nova.pomdp_expand_random_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint) # numBeliefsToAdd
_nova.pomdp_expand_distinct_beliefs_cpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_expand_pema_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.POINTER(pav.POMDPAlphaVectors)) # policy
# Functions from 'pomdp_sigma_cpu.h'.
_nova.pomdp_sigma_cpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numDesiredNonZeroValues
ct.POINTER(ct.c_float)) # sigma
# Functions from 'pomdp_model_gpu.h'.
_nova.pomdp_initialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_successors_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_state_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_observation_transitions_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_rewards_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_nonzero_beliefs_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_initialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
_nova.pomdp_uninitialize_belief_points_gpu.argtypes = tuple([ct.POINTER(NovaPOMDP)])
# Functions from 'pomdp_expand_gpu.h'.
_nova.pomdp_expand_random_gpu.argtypes = (ct.POINTER(NovaPOMDP),
ct.c_uint, # numThreads
ct.c_uint) # numBeliefsToAdd
| mit | 2,637,093,861,809,474,000 | 47.372093 | 98 | 0.565705 | false |
RyanDJLee/pyta | tests/test_type_inference/test_listcomp.py | 1 | 2140 | import astroid
import nose
from hypothesis import settings, given, HealthCheck
from typing import List
import tests.custom_hypothesis_support as cs
settings.load_profile("pyta")
@given(cs.homogeneous_iterable)
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_homogeneous_iterable(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a homogeneous list."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue().__args__[0]]
assert listcomp_node.inf_type.getValue() == expected_type
@given(cs.homogeneous_iterable)
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_heterogeneous_iterable(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a heterogeneous list."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue().__args__[0]]
assert listcomp_node.inf_type.getValue() == expected_type
@given(cs.valid_identifier(min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_list_comprehension_single_target_name_string(iterable):
"""Test Comprehension node visitor representing a comprehension expression with a single target and a
name expression over a string."""
program = f'[num for num in {repr(iterable)}]'
module, typeinferrer = cs._parse_text(program)
listcomp_node = list(module.nodes_of_class(astroid.ListComp))[0]
expected_type = List[listcomp_node.generators[0].iter.inf_type.getValue()]
assert listcomp_node.inf_type.getValue() == expected_type
if __name__ == '__main__':
nose.main()
| gpl-3.0 | 229,308,136,317,296,860 | 45.521739 | 105 | 0.744393 | false |
googleads/googleads-python-lib | examples/ad_manager/v202011/team_service/update_teams.py | 1 | 2271 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates teams by changing its description.
To determine which teams exist, run get_all_teams.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
TEAM_ID = 'INSERT_TEAM_ID_HERE'
def main(client, team_id):
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v202011')
# Create a filter statement to select a single team by ID.
statement = (ad_manager.StatementBuilder(version='v202011')
.Where('id = :teamId')
.WithBindVariable('teamId', int(team_id)))
# Get teams by statement.
response = team_service.getTeamsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
updated_teams = []
# Update each local team object by changing its description.
for team in response['results']:
team['description'] = 'this team is great!'
updated_teams.append(team)
# Update teams on the server.
teams = team_service.updateTeams(updated_teams)
# Display results.
for team in teams:
print('Team with id "%s" and name "%s" was updated.'
% (team['id'], team['name']))
else:
print('No teams found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, TEAM_ID)
| apache-2.0 | -2,277,132,433,655,368,400 | 32.397059 | 77 | 0.712902 | false |
starius/wt-classes | examples/make-all.py | 1 | 1140 | #!/usr/bin/python
import sys
import re
from optparse import OptionParser
entrypoints = []
anchors = []
parser = OptionParser()
parser.add_option("--cpp", dest="cpp")
parser.add_option("--template", dest="template")
parser.add_option("--wrasterimage", dest="wrasterimage", action="store_true")
(options, args) = parser.parse_args()
remove_main = re.compile("int main.+\}", re.DOTALL)
for cpp in options.cpp.split():
if not cpp.endswith('all.cpp'):
sys.stdout.write(remove_main.sub("", open(cpp).read()))
low = re.split(r'[/\\]', cpp)[-1].split('.')[0]
if not options.wrasterimage and low == 'captcha':
continue
Cap = re.search(r"create([^\s]+)App", open(cpp).read()).groups()[0]
args = {'low': low, 'Cap': Cap}
entrypoints.append('''
addEntryPoint(Wt::Application, create%(Cap)sApp, "/%(low)s");
''' % args)
anchors.append('''
new WAnchor("%(low)s", "%(Cap)s", root());
new WBreak(root());
''' % args)
sys.stdout.write(open(options.template).read() %
{'entrypoints': ''.join(entrypoints), 'anchors': ''.join(anchors)})
| gpl-2.0 | 7,167,563,787,169,136,000 | 31.571429 | 77 | 0.592105 | false |
SMTorg/smt | smt/applications/mfk.py | 1 | 27540 | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <[email protected]>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of
order 1 (AR1)
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
from copy import deepcopy
import numpy as np
from scipy.linalg import solve_triangular
from scipy import linalg
from scipy.spatial.distance import cdist
from packaging import version
from sklearn import __version__ as sklversion
if version.parse(sklversion) < version.parse("0.22"):
from sklearn.cross_decomposition.pls_ import PLSRegression as pls
else:
from sklearn.cross_decomposition import PLSRegression as pls
from smt.surrogate_models.krg_based import KrgBased
from smt.sampling_methods import LHS
from smt.utils.kriging_utils import (
cross_distances,
componentwise_distance,
standardization,
differences,
)
class NestedLHS(object):
def __init__(self, nlevel, xlimits, random_state=None):
"""
Constructor where values of options can be passed in.
Parameters
----------
nlevel : integer.
The number of design of experiments to be built
xlimits : ndarray
The interval of the domain in each dimension with shape (nx, 2)
random_state : Numpy RandomState object or seed number which controls random draws
"""
self.nlevel = nlevel
self.xlimits = xlimits
self.random_state = random_state
def __call__(self, nb_samples_hifi):
"""
Builds nlevel nested design of experiments of dimension dim and size n_samples.
Each doe sis built with the optmized lhs procedure.
Builds the highest level first; nested properties are ensured by deleting
the nearest neighbours in lower levels of fidelity.
Parameters
----------
nb_samples_hifi: The number of samples of the highest fidelity model.
nb_samples_fi(n-1) = 2 * nb_samples_fi(n)
Returns
------
list of length nlevel of design of experiemnts from low to high fidelity level.
"""
nt = []
for i in range(self.nlevel, 0, -1):
nt.append(pow(2, i - 1) * nb_samples_hifi)
if len(nt) != self.nlevel:
raise ValueError("nt must be a list of nlevel elements")
if np.allclose(np.sort(nt)[::-1], nt) == False:
raise ValueError("nt must be a list of decreasing integers")
doe = []
p0 = LHS(xlimits=self.xlimits, criterion="ese", random_state=self.random_state)
doe.append(p0(nt[0]))
for i in range(1, self.nlevel):
p = LHS(
xlimits=self.xlimits, criterion="ese", random_state=self.random_state
)
doe.append(p(nt[i]))
for i in range(1, self.nlevel)[::-1]:
ind = []
d = cdist(doe[i], doe[i - 1], "euclidean")
for j in range(doe[i].shape[0]):
dj = np.sort(d[j, :])
k = dj[0]
l = (np.where(d[j, :] == k))[0][0]
m = 0
while l in ind:
m = m + 1
k = dj[m]
l = (np.where(d[j, :] == k))[0][0]
ind.append(l)
doe[i - 1] = np.delete(doe[i - 1], ind, axis=0)
doe[i - 1] = np.vstack((doe[i - 1], doe[i]))
return doe
class MFK(KrgBased):
def _initialize(self):
super(MFK, self)._initialize()
declare = self.options.declare
declare(
"rho_regr",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type for rho",
)
declare(
"optim_var",
False,
types=bool,
values=(True, False),
desc="If True, the variance at HF samples is forced to zero",
)
declare(
"propagate_uncertainty",
True,
types=bool,
values=(True, False),
desc="If True, the variance cotribution of lower fidelity levels are considered",
)
self.name = "MFK"
def _differences(self, X, Y):
"""
Compute the distances
"""
return differences(X, Y)
def _check_list_structure(self, X, y):
"""
checks if the data structure is compatible with MFK.
sets class attributes such as (number of levels of Fidelity, training points in each level, ...)
Arguments :
X : list of arrays, each array corresponds to a fidelity level. starts from lowest to highest
y : same as X
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 1 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.nx = n_features[0]
self.nt_all = n_samples
self.nlvl = nlevel
self.ny = y[0].shape[1]
self.X = X[:]
self.y = y[:]
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model
"""
self._new_train_init()
theta0 = self.options["theta0"].copy()
noise0 = self.options["noise0"].copy()
for lvl in range(self.nlvl):
self._new_train_iteration(lvl)
self.options["theta0"] = theta0
self.options["noise0"] = noise0
self._new_train_finalize(lvl)
def _new_train_init(self):
if self.name in ["MFKPLS", "MFKPLSK"]:
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
# PLS is done on the highest fidelity identified by the key None
self.m_pls = _pls.fit(
self.training_points[None][0][0].copy(),
self.training_points[None][0][1].copy(),
)
self.coeff_pls = self.m_pls.x_rotations_
except StopIteration:
self.coeff_pls = np.zeros(
self.training_points[None][0][0].shape[1], self.options["n_comp"]
)
xt = []
yt = []
i = 0
while self.training_points.get(i, None) is not None:
xt.append(self.training_points[i][0][0])
yt.append(self.training_points[i][0][1])
i = i + 1
xt.append(self.training_points[None][0][0])
yt.append(self.training_points[None][0][1])
self._check_list_structure(xt, yt)
self._check_param()
X = self.X
y = self.y
_, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
np.concatenate(xt, axis=0), np.concatenate(yt, axis=0)
)
nlevel = self.nlvl
# initialize lists
self.optimal_noise_all = nlevel * [0]
self.D_all = nlevel * [0]
self.F_all = nlevel * [0]
self.p_all = nlevel * [0]
self.q_all = nlevel * [0]
self.optimal_rlf_value = nlevel * [0]
self.optimal_par = nlevel * [{}]
self.optimal_theta = nlevel * [0]
self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
def _new_train_iteration(self, lvl):
n_samples = self.nt_all
self.options["noise0"] = np.array([self.options["noise0"][lvl]]).flatten()
self.options["theta0"] = self.options["theta0"][lvl, :]
self.X_norma = self.X_norma_all[lvl]
self.y_norma = self.y_norma_all[lvl]
if self.options["eval_noise"]:
if self.options["use_het_noise"]:
# hetGP works with unique design variables
(
self.X_norma,
self.index_unique, # do we need to store it?
self.nt_reps, # do we need to store it?
) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
self.nt_all[lvl] = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt_all[lvl]):
y_norma_unique.append(np.mean(self.y_norma[self.index_unique == i]))
y_norma_unique = np.array(y_norma_unique).reshape(-1, 1)
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt_all[lvl])
for i in range(self.nt_all[lvl]):
diff = self.y_norma[self.index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / self.nt_reps
self.optimal_noise_all[lvl] = self.optimal_noise
self.y_norma = y_norma_unique
self.X_norma_all[lvl] = self.X_norma
self.y_norma_all[lvl] = self.y_norma
else:
self.optimal_noise = self.options["noise0"] / self.y_std ** 2
self.optimal_noise_all[lvl] = self.optimal_noise
# Calculate matrix of distances D between samples
self.D_all[lvl] = cross_distances(self.X_norma)
# Regression matrix and parameters
self.F_all[lvl] = self._regression_types[self.options["poly"]](self.X_norma)
self.p_all[lvl] = self.F_all[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self._regression_types[self.options["rho_regr"]](self.X_norma)
self.q_all[lvl] = F_rho.shape[1]
self.F_all[lvl] = np.hstack(
(
F_rho
* np.dot(
self._predict_intermediate_values(
self.X_norma, lvl, descale=False
),
np.ones((1, self.q_all[lvl])),
),
self.F_all[lvl],
)
)
else:
self.q_all[lvl] = 0
n_samples_F_i = self.F_all[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d."
)
% (n_samples_F_i, self.p_all[lvl] + self.q_all[lvl])
)
# Determine Gaussian Process model parameters
self.F = self.F_all[lvl]
D, self.ij = self.D_all[lvl]
self._lvl = lvl
self.nt = self.nt_all[lvl]
self.q = self.q_all[lvl]
self.p = self.p_all[lvl]
(
self.optimal_rlf_value[lvl],
self.optimal_par[lvl],
self.optimal_theta[lvl],
) = self._optimize_hyperparam(D)
if self.options["eval_noise"] and not self.options["use_het_noise"]:
tmp_list = self.optimal_theta[lvl]
self.optimal_theta[lvl] = tmp_list[:-1]
self.optimal_noise = tmp_list[-1]
self.optimal_noise_all[lvl] = self.optimal_noise
del self.y_norma, self.D, self.optimal_noise
def _new_train_finalize(self, lvl):
if self.options["eval_noise"] and self.options["optim_var"]:
X = self.X
for lvl in range(self.nlvl - 1):
self.set_training_values(
X[lvl], self._predict_intermediate_values(X[lvl], lvl + 1), name=lvl
)
self.set_training_values(
X[-1], self._predict_intermediate_values(X[-1], self.nlvl)
)
self.options["eval_noise"] = False
self._new_train()
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance(dx, self.options["corr"], self.nx)
return d
def _predict_intermediate_values(self, X, lvl, descale=True):
"""
Evaluates the model at a set of points.
Used for training the model at level lvl.
Allows to relax the order problem.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
lvl : level at which the prediction is made
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n_eval, _ = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, lvl))
if descale:
X = (X - self.X_offset) / self.X_scale
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
beta = self.optimal_par[0]["beta"]
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
# Calculate recursively kriging mean and variance at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
# scaled predictor
mu[:, i] = (np.dot(f.T, beta) + np.dot(r_, gamma)).ravel()
# scaled predictor
if descale:
mu = mu * self.y_std + self.y_mean
return mu[:, -1].reshape((n_eval, 1))
def _predict_values(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self._predict_intermediate_values(X, self.nlvl)
def _predict_variances(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self.predict_variances_all_levels(X)[0][:, -1]
def predict_variances_all_levels(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization X = atleast_2d(X)
nlevel = self.nlvl
sigma2_rhos = []
n_eval, n_features_X = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
X = (X - self.X_offset) / self.X_scale
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, nlevel))
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Get regression function and correlation
F = self.F_all[0]
C = self.optimal_par[0]["C"]
beta = self.optimal_par[0]["beta"]
Ft = solve_triangular(C, F, lower=True)
# yt = solve_triangular(C, self.y_norma_all[0], lower=True)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
self.sigma2_rho = nlevel * [None]
MSE = np.zeros((n_eval, nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[0]["G"]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std ** 2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
)
# Calculate recursively kriging variance at level i
for i in range(1, nlevel):
F = self.F_all[i]
C = self.optimal_par[i]["C"]
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y_norma_all[i], lower=True)
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[i]["G"]
beta = self.optimal_par[i]["beta"]
# scaled predictor
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std ** 2
q = self.q_all[i]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(
g,
sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q]
+ np.dot(beta[:q], beta[:q].T),
)
sigma2_rho = (sigma2_rho * g).sum(axis=1)
sigma2_rhos.append(sigma2_rho)
if self.name in ["MFKPLS", "MFKPLSK"]:
p = self.p_all[i]
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t ** 2).sum(axis=0))
+ sigma2 * (u_ ** 2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
# scaled predictor
MSE *= self.y_std ** 2
return MSE, sigma2_rhos
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray*self.y_std/self.X_scale[kx])
Derivative values.
"""
lvl = self.nlvl
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
dy_dx = np.zeros((n_eval, lvl))
if self.options["corr"] != "squar_exp":
raise ValueError(
"The derivative is only available for square exponential kernel"
)
if self.options["poly"] == "constant":
df = np.zeros([n_eval, 1])
elif self.options["poly"] == "linear":
df = np.zeros((n_eval, self.nx + 1))
df[:, 1:] = 1
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
df0 = deepcopy(df)
if self.options["rho_regr"] != "constant":
raise ValueError(
"The derivative is only available for regression rho constant"
)
# Get pairwise componentwise L1-distances to the input training set
dx = self._differences(x, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Compute the correlation function
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par[0]["beta"]
gamma = self.optimal_par[0]["gamma"]
df_dx = np.dot(df, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[0][:, kx].reshape(
(1, self.nt_all[0])
)
theta = self._get_theta(0)
dy_dx[:, 0] = np.ravel((df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma)))
# Calculate recursively derivative at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](x)
dx = self._differences(x, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
df = np.vstack((g.T * dy_dx[:, i - 1], df0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
df_dx = np.dot(df.T, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[i][:, kx].reshape(
(1, self.nt_all[i])
)
theta = self._get_theta(i)
# scaled predictor
dy_dx[:, i] = np.ravel(df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma))
return dy_dx[:, -1] * self.y_std / self.X_scale[kx]
def _get_theta(self, i):
return self.optimal_theta[i]
def _check_param(self):
"""
Overrides KrgBased implementation
This function checks some parameters of the model.
"""
if self.name in ["MFKPLS", "MFKPLSK"]:
d = self.options["n_comp"]
else:
d = self.nx
if self.options["corr"] == "act_exp":
raise ValueError("act_exp correlation function must be used with MGP")
if self.name in ["MFKPLS"]:
if self.options["corr"] not in ["squar_exp", "abs_exp"]:
raise ValueError(
"MFKPLS only works with a squared exponential or an absolute exponential kernel"
)
elif self.name in ["MFKPLSK"]:
if self.options["corr"] not in ["squar_exp"]:
raise ValueError(
"MFKPLSK only works with a squared exponential kernel (until we prove the contrary)"
)
if isinstance(self.options["theta0"], np.ndarray):
if self.options["theta0"].shape != (self.nlvl, d):
raise ValueError(
"the dimensions of theta0 %s should coincide to the number of dim %s"
% (self.options["theta0"].shape, (self.nlvl, d))
)
else:
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones((self.nlvl, d))
elif len(self.options["theta0"]) == self.nlvl:
self.options["theta0"] = np.array(self.options["theta0"]).reshape(
-1, 1
)
self.options["theta0"] *= np.ones((1, d))
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s) or levels of fidelity (%s)."
% (len(self.options["theta0"]), d, self.nlvl)
)
else:
self.options["theta0"] *= np.ones((self.nlvl, 1))
if len(self.options["noise0"]) != self.nlvl:
if len(self.options["noise0"]) == 1:
self.options["noise0"] = self.nlvl * [self.options["noise0"]]
else:
raise ValueError(
"the length of noise0 (%s) should be equal to the number of levels of fidelity (%s)."
% (len(self.options["noise0"]), self.nlvl)
)
for i in range(self.nlvl):
if self.options["use_het_noise"]:
if len(self.X[i]) == len(np.unique(self.X[i])):
if len(self.options["noise0"][i]) != self.nt_all[i]:
if len(self.options["noise0"][i]) == 1:
self.options["noise0"][i] *= np.ones(self.nt_all[i])
else:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (i, len(self.options["noise0"][i]), self.nt_all[i])
)
else:
if len(self.options["noise0"][i]) != 1:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to one."
% (i, len(self.options["noise0"][i]))
)
| bsd-3-clause | 8,541,336,273,020,419,000 | 35.769025 | 141 | 0.510022 | false |
EricRahm/log-spam-hell | logspam/bisect.py | 1 | 10157 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import Counter
from logspam import WARNING_RE
from logspam.cli import BaseCommandLineArgs
from logspam.logs import retrieve_test_logs
from mozregression.bisector import (
Bisector, Bisection, NightlyHandler, IntegrationHandler)
from mozregression.dates import parse_date
from mozregression.errors import DateFormatError
from mozregression.fetch_build_info import IntegrationInfoFetcher
from mozregression.fetch_configs import create_config
from mozregression.json_pushes import JsonPushes
from mozregression.log import init_logger
from mozregression.test_runner import TestRunner
import re
class WarningBisector(object):
def __init__(self, good, bad, platform, warning,
warning_limit, warning_re, ignore_lines,
required_test):
init_logger()
self.use_nightly = True
try:
self.good = parse_date(good)
self.bad = parse_date(bad)
except DateFormatError:
# This hopefully a revision range. We can bypass nightly and
# go directly to InboundHandler. That itself is a bit of a misnomer,
# it will still bisect m-c builds, but by changeset range, not date
# range.
self.use_nightly = False
self.good = good
self.bad = bad
self.ignore_lines = ignore_lines
self.test_runner = WarningTestRunner(
warning, platform,
ignore_lines=ignore_lines,
warning_re=warning_re,
warning_limit=warning_limit,
required_test=required_test)
# Convert the platform to a mozregression friendly version.
# Also avoid overwriting the os module by *not* using |os| for a
# variable name.
(_os, bits) = re.match(r'([a-zA-Z]+)-?([0-9]+)?', platform).groups()
if not bits or bits not in (32, 64):
bits = 32
# windows7-32
# windows7-32-vm
# win32
# win64
if '64' in platform:
bits = 64
if _os.startswith('win'):
_os = 'win'
print("_os = %s bits = %s" % (_os, bits))
# TODO(ER): We might be able to ditch this.
self.fetch_config = create_config('firefox', _os, int(bits))
# Hardcode to m-c for now.
self.fetch_config.set_repo('mozilla-central')
self.fetch_config.set_build_type('debug')
class FakeDownloadManager:
def focus_download(self, foo):
pass
dm = FakeDownloadManager()
self.bisector = Bisector(self.fetch_config, self.test_runner, dm, False, None)
def bisect(self):
if self.use_nightly:
result = self.bisect_nightly()
else:
result = self.bisect_inbound(self.good, self.bad)
(good, bad) = result
if self.test_runner.check_for_move(self.fetch_config.repo, good):
print("You should probably try bisecting again from the good revision")
print("Done bisecting I guess")
return result
def bisect_nightly(self):
handler = NightlyHandler(ensure_good_and_bad=True)
result = self.bisector.bisect(handler, self.good, self.bad)
if result == Bisection.FINISHED:
print("Got as far as we can go bisecting nightlies...")
handler.print_range()
print("Switching bisection method to taskcluster")
result = self.bisect_inbound(handler.good_revision, handler.bad_revision)
else:
# TODO(ER): maybe this should be an exception...
result = (None, None)
return result
def bisect_inbound(self, good_rev, bad_rev):
# Remember, InboundHandler is just a changeset based bisector. It will
# still potentially bisect m-c first.
handler = InboundHandler()
result = self.bisector.bisect(handler, good_rev, bad_rev, expand=0)
if result == Bisection.FINISHED:
print("No more m-c revisions :(")
handler.print_range()
# Try switching over to the integration branch.
if len(handler.build_range) == 2:
result = handler.handle_merge()
if result:
branch, good_rev, bad_rev = result
self.fetch_config.set_repo(branch)
return self.bisect_inbound(good_rev, bad_rev)
return (handler.good_revision, handler.bad_revision)
class BisectCommandLineArgs(BaseCommandLineArgs):
@staticmethod
def do_bisect(args):
print("do_bisect called")
print(args)
bisector = WarningBisector(args.good, args.bad, args.platform,
args.warning, args.warning_limit,
args.warning_re, args.ignore_lines,
args.required_test)
# TODO(ER): Get the pushlog for bad, check for the file the warning is
# in in the changeset.
(good, bad) = bisector.bisect()
def add_command(self, p):
parser = p.add_parser('bisect',
help='Attempts to find the changeset that introduced a given '
'warning through bisection.')
self.add_arguments(parser)
parser.set_defaults(func=BisectCommandLineArgs.do_bisect)
def add_arguments(self, p):
# TODO(ER): add a date/revision parser
p.add_argument('good', action='store', default=None,
help='Last known good date. Will be validated.')
p.add_argument('bad', action='store', default=None,
help='Last known bad date.')
p.add_argument('warning', nargs='?',
help='The text of a warning you want the full details of.')
super(BisectCommandLineArgs, self).add_arguments(p)
p.add_argument('--ignore-lines', action='store_true', default=False,
help='Ignore line numbers when bisecting warnings. Useful if' \
' the line number of the warning has changed. Not so ' \
'useful if there are a lot of similar warnings in the ' \
'file.')
p.add_argument('--warning-limit', action='store', type=int, default=1000,
help='The threshold of warnings for going from good to ' \
'bad. Default: 1000.')
p.add_argument('--required-test', action='store', default=None,
help='Test that must be present to compare revisions')
class WarningTestRunner(TestRunner):
"""
TestRunner to use in conjunction with bisection.
"""
def __init__(self, warning, platform='linux64', ignore_lines=False,
warning_re=WARNING_RE, warning_limit=1000,
required_test=None):
TestRunner.__init__(self)
self.warning = warning
self.warning_re = warning_re
self.platform = platform
self.ignore_lines = ignore_lines
self.warning_limit = warning_limit
self.required_test = required_test or ""
def check_for_move(self, repo, changeset):
"""
Checks if the warning has moved lines but still exists.
"""
if self.ignore_lines:
return False
files = retrieve_test_logs(
repo, changeset[:12],
self.platform, warning_re=self.warning_re)
combined_warnings = Counter()
for log in files:
if log:
combined_warnings.update(log.warnings)
possible_move_found = False
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
for (k, v) in combined_warnings.items():
if k.startswith(normalized) and v > self.warning_limit:
print("Possible line move:\n %d - %s" % (v, k))
possible_move_found = True
if possible_move_found:
jp = JsonPushes(repo)
push = jp.push(changeset)
print("Try this date: %s" % push.utc_date)
return possible_move_found
def evaluate(self, build_info, allow_back=False):
files = retrieve_test_logs(
build_info.repo_name, build_info.changeset[:12],
self.platform, warning_re=self.warning_re)
# Somewhat arbitrary, but we need to make sure there are enough tests
# run in order to make a reasonable evaluation of the amount of
# warnings present.
if not files or len(files) < 20:
# Tell the bisector to skip this build.
print("Skipping build %s, not enough tests run" % build_info.changeset[:12])
return 's'
combined_warnings = Counter()
found_test = False
for log in files:
if log:
combined_warnings.update(log.warnings)
if not found_test:
found_test = self.required_test in log.job_name
if self.ignore_lines:
normalized = re.match(r'^(.*), line [0-9]+$', self.warning).group(1)
total = 0
for (k, v) in combined_warnings.items():
if k.startswith(normalized):
total += v
print("%d - %s" % (total, normalized))
else:
total = combined_warnings[self.warning]
print("%d - %s" % (total, self.warning))
if not found_test:
print("Skipping build %s, required test %s was not run" % (
build_info.changeset[:12], self.required_test))
return 's'
if total > self.warning_limit:
print("%d > %d" % (total, self.warning_limit))
return 'b'
else:
print("%d <= %d" % (total, self.warning_limit))
return 'g'
def run_once(self, build_info):
return 0 if self.evaluate(build_info) == 'g' else 1
| mpl-2.0 | 8,093,395,756,673,294,000 | 37.184211 | 88 | 0.57773 | false |
mrrrgn/build-mozharness | configs/b2g_bumper/v2.1s.py | 1 | 3817 | #!/usr/bin/env python
config = {
"exes": {
# Get around the https warnings
"hg": ['/usr/local/bin/hg', "--config", "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt"],
"hgtool.py": ["/usr/local/bin/hgtool.py"],
"gittool.py": ["/usr/local/bin/gittool.py"],
},
'gecko_pull_url': 'https://hg.mozilla.org/releases/mozilla-b2g34_v2_1s/',
'gecko_push_url': 'ssh://hg.mozilla.org/releases/mozilla-b2g34_v2_1s/',
'gecko_local_dir': 'mozilla-b2g34_v2_1s',
'git_ref_cache': '/builds/b2g_bumper/git_ref_cache.json',
'manifests_repo': 'https://git.mozilla.org/b2g/b2g-manifest.git',
'manifests_revision': 'origin/v2.1s',
'hg_user': 'B2G Bumper Bot <[email protected]>',
"ssh_key": "~/.ssh/ffxbld_rsa",
"ssh_user": "ffxbld",
'hgtool_base_bundle_urls': ['https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/bundles'],
'gaia_repo_url': 'https://hg.mozilla.org/integration/gaia-2_1s',
'gaia_revision_file': 'b2g/config/gaia.json',
'gaia_max_revisions': 5,
# Which git branch this hg repo corresponds to
'gaia_git_branch': 'v2.1s',
'gaia_mapper_project': 'gaia',
'mapper_url': 'http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}',
'devices': {
'dolphin': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'dolphin-512': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-jb': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-ics': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
# Equivalent to emulator-ics - see bug 916134
# Remove once the above bug resolved
'emulator': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
'flame': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'flame-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'nexus-4': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
},
'repo_remote_mappings': {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'http://sprdsource.spreadtrum.com:8085/b2g/android': 'https://git.mozilla.org/external/sprd-aosp',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
'git://github.com/t2m-foxfone/': 'https://git.mozilla.org/external/t2m-foxfone',
# Some mappings to ourself, we want to leave these as-is!
'https://git.mozilla.org/external/aosp': 'https://git.mozilla.org/external/aosp',
'https://git.mozilla.org/external/caf': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/external/apitrace': 'https://git.mozilla.org/external/apitrace',
'https://git.mozilla.org/external/t2m-foxfone': 'https://git.mozilla.org/external/t2m-foxfone',
},
}
| mpl-2.0 | 9,035,164,040,511,455,000 | 41.411111 | 106 | 0.563008 | false |
KarrLab/obj_model | tests/fixtures/migrate/wc_lang_fixture/wc_lang/transform/split_reversible_reactions.py | 1 | 5346 | """ Transform models.
:Author: Jonathan Karr <[email protected]>
:Date: 2018-06-19
:Copyright: 2018, Karr Lab
:License: MIT
"""
from .core import Transform
from wc_lang import Model, Reaction, RateLawDirection
from wc_onto import onto
from wc_utils.util.ontology import are_terms_equivalent
import copy
import re
class SplitReversibleReactionsTransform(Transform):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions """
class Meta(object):
id = 'SplitReversibleReactions'
label = 'Split reversible reactions into separate forward and backward reactions'
def run(self, model):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions
Args:
model (:obj:`Model`): model definition
Returns:
:obj:`Model`: same model definition, but with reversible reactions split into separate forward and backward reactions
"""
for submodel in model.submodels:
if not are_terms_equivalent(submodel.framework, onto['WC:dynamic_flux_balance_analysis']):
for rxn in list(submodel.reactions):
if rxn.reversible:
# remove reversible reaction
model.reactions.remove(rxn)
submodel.reactions.remove(rxn)
# create separate forward and reverse reactions
rxn_for = submodel.reactions.create(
model=model,
id='{}_forward'.format(rxn.id),
name='{} (forward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn_bck = submodel.reactions.create(
model=model,
id='{}_backward'.format(rxn.id),
name='{} (backward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn.evidence = []
rxn.conclusions = []
rxn.identifiers = []
rxn.references = []
# copy participants and negate for backward reaction
for part in rxn.participants:
rxn_for.participants.append(part)
part_back = part.species.species_coefficients.get_one(coefficient=-1 * part.coefficient)
if part_back:
rxn_bck.participants.append(part_back)
else:
rxn_bck.participants.create(species=part.species, coefficient=-1 * part.coefficient)
rxn.participants = []
# copy rate laws
law_for = rxn.rate_laws.get_one(direction=RateLawDirection.forward)
law_bck = rxn.rate_laws.get_one(direction=RateLawDirection.backward)
if law_for:
law_for.reaction = rxn_for
law_for.direction = RateLawDirection.forward
law_for.id = law_for.gen_id()
if law_bck:
law_bck.reaction = rxn_bck
law_bck.direction = RateLawDirection.forward
law_bck.id = law_bck.gen_id()
# copy dFBA objective: unreachable because only non-dFBA reactions are split
if rxn.dfba_obj_expression:
dfba_obj_expr = rxn.dfba_obj_expression # pragma: no cover
parsed_expr = dfba_obj_expr._parsed_expression # pragma: no cover
dfba_obj_expr.expression = parsed_expr.expression = re.sub(
r'\b' + rxn.id + r'\b',
'({} - {})'.format(rxn_for.id, rxn_bck.id),
dfba_obj_expr.expression) # pragma: no cover
parsed_expr._objs[Reaction].pop(rxn.id) # pragma: no cover
parsed_expr._objs[Reaction][rxn_for.id] = rxn_for # pragma: no cover
parsed_expr._objs[Reaction][rxn_bck.id] = rxn_bck # pragma: no cover
parsed_expr.tokenize() # pragma: no cover
rxn.dfba_obj_expression = None # pragma: no cover
rxn_for.dfba_obj_expression = dfba_obj_expr # pragma: no cover
rxn_bck.dfba_obj_expression = dfba_obj_expr # pragma: no cover
return model
| mit | -2,379,198,555,471,933,400 | 45.894737 | 129 | 0.491957 | false |
guardicore/monkey | monkey/tests/unit_tests/infection_monkey/system_info/windows_cred_collector/test_pypykatz_handler.py | 1 | 5948 | from unittest import TestCase
from infection_monkey.system_info.windows_cred_collector.pypykatz_handler import (
_get_creds_from_pypykatz_session,
)
class TestPypykatzHandler(TestCase):
# Made up credentials, but structure of dict should be roughly the same
PYPYKATZ_SESSION = {
"authentication_id": 555555,
"session_id": 3,
"username": "Monkey",
"domainname": "ReAlDoMaIn",
"logon_server": "ReAlDoMaIn",
"logon_time": "2020-06-02T04:53:45.256562+00:00",
"sid": "S-1-6-25-260123139-3611579848-5589493929-3021",
"luid": 123086,
"msv_creds": [
{
"username": "monkey",
"domainname": "ReAlDoMaIn",
"NThash": b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash": None,
"SHAHash": b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
}
],
"wdigest_creds": [
{
"credtype": "wdigest",
"username": "monkey",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme",
"luid": 123086,
}
],
"ssp_creds": [
{
"credtype": "wdigest",
"username": "monkey123",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme123",
"luid": 123086,
}
],
"livessp_creds": [
{
"credtype": "wdigest",
"username": "monk3y",
"domainname": "ReAlDoMaIn",
"password": "canyoufindm3",
"luid": 123086,
}
],
"dpapi_creds": [
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b7294"
"7f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b729"
"47f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72"
"947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b729"
"47f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{"credtype": "dpapi", "key_guid": "9123-123ae123de4-121239-3123-421f"},
],
"kerberos_creds": [
{
"credtype": "kerberos",
"username": "monkey_kerb",
"password": None,
"domainname": "ReAlDoMaIn",
"luid": 123086,
"tickets": [],
}
],
"credman_creds": [
{
"credtype": "credman",
"username": "monkey",
"domainname": "monkey.ad.monkey.com",
"password": "canyoufindme2",
"luid": 123086,
},
{
"credtype": "credman",
"username": "[email protected]",
"domainname": "moneky.monkey.com",
"password": "canyoufindme1",
"luid": 123086,
},
{
"credtype": "credman",
"username": "test",
"domainname": "test.test.ts",
"password": "canyoufindit",
"luid": 123086,
},
],
"tspkg_creds": [],
}
def test__get_creds_from_pypykatz_session(self):
results = _get_creds_from_pypykatz_session(TestPypykatzHandler.PYPYKATZ_SESSION)
test_dicts = [
{
"username": "monkey",
"ntlm_hash": "31b73c59d7e0c089c031d6cfe0d16ae9",
"password": "",
"lm_hash": "",
},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme", "lm_hash": ""},
{
"username": "monkey123",
"ntlm_hash": "",
"password": "canyoufindme123",
"lm_hash": "",
},
{"username": "monk3y", "ntlm_hash": "", "password": "canyoufindm3", "lm_hash": ""},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme2", "lm_hash": ""},
{
"username": "[email protected]",
"ntlm_hash": "",
"password": "canyoufindme1",
"lm_hash": "",
},
{"username": "test", "ntlm_hash": "", "password": "canyoufindit", "lm_hash": ""},
]
results = [result.to_dict() for result in results]
[self.assertTrue(test_dict in results) for test_dict in test_dicts]
| gpl-3.0 | -484,856,487,669,444,350 | 36.64557 | 96 | 0.47495 | false |
PaddlePaddle/models | dygraph/mobilenet/imagenet_dataset.py | 1 | 1987 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import math
import random
import numpy as np
from paddle.vision.datasets import DatasetFolder
from paddle.vision.transforms import transforms
from paddle import fluid
class ImageNetDataset(DatasetFolder):
def __init__(self,
path,
mode='train',
image_size=224,
resize_short_size=256):
super(ImageNetDataset, self).__init__(path)
self.mode = mode
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
if self.mode == 'train':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.Transpose(order=(2, 0, 1)), normalize
])
else:
self.transform = transforms.Compose([
transforms.Resize(resize_short_size),
transforms.CenterCrop(image_size),
transforms.Transpose(order=(2, 0, 1)), normalize
])
def __getitem__(self, idx):
img_path, label = self.samples[idx]
img = cv2.imread(img_path).astype(np.float32)
label = np.array([label]).astype(np.int64)
return self.transform(img), label
def __len__(self):
return len(self.samples)
| apache-2.0 | 260,625,212,117,591,070 | 33.859649 | 74 | 0.636135 | false |
lillisgary/shiny-shame | theme/admin.py | 1 | 1252 | from django.contrib import admin
from .models import HomePage, Slide, IconBlurb, Portfolio, PortfolioItemImage, PortfolioItem, PortfolioItemCategory, TextSlider, DocumentListItem, DocumentList, DocumentListItemCategory
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
class SlideInline(TabularDynamicInlineAdmin):
model = Slide
class IconBlurbInline(TabularDynamicInlineAdmin):
model = IconBlurb
class PortfolioItemImageInline(TabularDynamicInlineAdmin):
model = PortfolioItemImage
class TextSliderInline(TabularDynamicInlineAdmin):
model = TextSlider
class HomePageAdmin(PageAdmin):
inlines = (SlideInline, IconBlurbInline, TextSliderInline,)
class PortfolioItemAdmin(PageAdmin):
inlines = (PortfolioItemImageInline,)
class DocumentListItemInline(TabularDynamicInlineAdmin):
model = DocumentListItem
class DocumentListAdmin(PageAdmin):
inlines = (DocumentListItemInline,)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(Portfolio, PageAdmin)
admin.site.register(PortfolioItem, PortfolioItemAdmin)
admin.site.register(PortfolioItemCategory)
admin.site.register(DocumentList, DocumentListAdmin)
admin.site.register(DocumentListItemCategory)
| gpl-2.0 | 8,358,480,571,464,116,000 | 34.771429 | 185 | 0.835463 | false |
sjdv1982/seamless | tests/lowlevel/module-package.py | 1 | 1611 | import seamless
from seamless.core import macro_mode_on
from seamless.core import context, cell, macro
mod_init = """
from .mod3 import testvalue
"""
mod1 = """
from . import testvalue
def func():
return testvalue
"""
mod2 = """
from .mod1 import func
"""
mod3 = """
testvalue = 42
"""
package = {
"__init__": {
"language": "python",
"code": mod_init,
"dependencies": [".mod3"],
},
"mod1": {
"language": "python",
"code": mod1,
"dependencies": ["__init__"],
},
"mod2": {
"language": "python",
"code": mod2,
"dependencies": [".mod1"],
},
"mod3": {
"language": "python",
"code": mod3,
"dependencies": [],
},
}
testmodule = {
"type": "interpreted",
"language": "python",
"code": package,
}
with macro_mode_on():
ctx = context(toplevel=True)
ctx.param = cell("plain").set(1)
ctx.macro = macro({
"param": "plain",
"testmodule": ("plain", "module"),
})
ctx.param.connect(ctx.macro.param)
ctx.macro_code = cell("macro").set("""
print("macro execute")
from .testmodule import testvalue
from .testmodule.mod1 import func
from .testmodule.mod2 import func as func2
print(testvalue)
print(func is func2)
print(func2())
print(testmodule.testvalue)
from .testmodule import mod3
print(mod3.testvalue)
print("/macro execute")
""")
ctx.macro_code.connect(ctx.macro.code)
ctx.testmodule = cell("plain").set(testmodule)
ctx.testmodule.connect(ctx.macro.testmodule)
print("START")
ctx.compute()
print(ctx.macro.exception)
| mit | -7,188,232,584,140,256,000 | 18.409639 | 50 | 0.591558 | false |
twz915/django | django/core/serializers/json.py | 1 | 3709 | """
Serialize data to/from JSON
"""
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.duration import duration_iso_string
from django.utils.functional import Promise
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
self.json_kwargs.setdefault('cls', DjangoJSONEncoder)
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, str)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, datetime.timedelta):
return duration_iso_string(o)
elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
| bsd-3-clause | -3,236,497,983,382,932,500 | 31.535088 | 85 | 0.60151 | false |
hyperized/ansible | lib/ansible/modules/network/ios/ios_lacp.py | 1 | 4046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for ios_lacp
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: ios_lacp
version_added: 2.9
short_description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco IOS devices.
description: This module provides declarative management of Global LACP on Cisco IOS network devices.
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL
- This module works with connection C(network_cli),
See L(IOS Platform Options,../network/user_guide/platform_ios.html).
options:
config:
description: The provided configurations.
type: dict
suboptions:
system:
description: This option sets the default system parameters for LACP.
type: dict
suboptions:
priority:
description:
- LACP priority for the system.
- Refer to vendor documentation for valid values.
type: int
required: True
state:
description:
- The state of the configuration after module completion
type: str
choices:
- merged
- replaced
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 32768, 5e00.0000.8000
- name: Merge provided configuration with device configuration
ios_lacp:
config:
system:
priority: 123
state: merged
# After state:
# ------------
#
# vios#show lacp sys-id
# 123, 5e00.0000.8000
# Using replaced
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 500, 5e00.0000.8000
- name: Replaces Global LACP configuration
ios_lacp:
config:
system:
priority: 123
state: replaced
# After state:
# ------------
#
# vios#show lacp sys-id
# 123, 5e00.0000.8000
# Using Deleted
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 500, 5e00.0000.8000
- name: Delete Global LACP attribute
ios_lacp:
state: deleted
# After state:
# -------------
#
# vios#show lacp sys-id
# 32768, 5e00.0000.8000
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['lacp system-priority 10']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.argspec.lacp.lacp import LacpArgs
from ansible.module_utils.network.ios.config.lacp.lacp import Lacp
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=LacpArgs.argument_spec,
supports_check_mode=True)
result = Lacp(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,138,347,326,964,049,400 | 21.353591 | 101 | 0.636678 | false |
ramusus/django-vkontakte-groups-migration | vkontakte_groups_migration/migrations/0002_auto__add_field_groupmigration_hidden.py | 1 | 16399 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupMigration.hidden'
db.add_column('vkontakte_groups_groupstatmembers', 'hidden',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupMigration.hidden'
db.delete_column('vkontakte_groups_groupstatmembers', 'hidden')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
'vkontakte_groups_migration.groupmigration': {
'Meta': {'ordering': "('group', 'time', '-id')", 'unique_together': "(('group', 'time'),)", 'object_name': 'GroupMigration', 'db_table': "'vkontakte_groups_groupstatmembers'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'migrations'", 'to': "orm['vkontakte_groups.Group']"}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.TextField', [], {}),
'members_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_entered': ('django.db.models.fields.TextField', [], {}),
'members_deactivated_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_deactivated_left': ('django.db.models.fields.TextField', [], {}),
'members_deactivated_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_entered': ('django.db.models.fields.TextField', [], {}),
'members_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_has_avatar_entered': ('django.db.models.fields.TextField', [], {}),
'members_has_avatar_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_has_avatar_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_has_avatar_left': ('django.db.models.fields.TextField', [], {}),
'members_has_avatar_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_has_avatar_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_left': ('django.db.models.fields.TextField', [], {}),
'members_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'offset': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': "orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'vkontakte_wall.comment': {
'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': "orm['vkontakte_wall.Post']"}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'vkontakte_wall.post': {
'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'},
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'copy_owner_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'copy_post_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'like_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'repost_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['vkontakte_groups_migration'] | bsd-3-clause | 2,832,130,740,180,638,000 | 83.536082 | 198 | 0.565217 | false |
spaceboats/busbus | tests/test_provider_ctran.py | 1 | 1025 | import busbus
from busbus.provider.ctran import CTranProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def ctran_provider(engine):
responses.add(responses.GET, CTranProvider.gtfs_url,
body=mock_gtfs_zip('ctran'), status=200,
content_type='application/zip')
return CTranProvider(engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(ctran_provider):
assert len(list(ctran_provider.routes)) == 28
@pytest.mark.parametrize('stop_id,count', [
(u'2058', 4)
])
def test_43_to_eaton_hall(ctran_provider, stop_id, count):
stop = ctran_provider.get(busbus.Stop, stop_id)
route = ctran_provider.get(busbus.Route, u'46')
assert len(list(ctran_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == count
| mit | 6,004,438,207,539,431,000 | 30.060606 | 72 | 0.694634 | false |
nexec/vkcopy2mp3p | vkcopy2mp3p.py | 1 | 4785 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sqlite3 as db
import sys
import os
import pycurl
import StringIO
import re
import urllib
import json
from random import shuffle
PROFILE = 'default'
argc = len(sys.argv)
if argc < 3 or argc > 4:
sys.stderr.write('Usage: %s /path/to/dir count_of_songs [PROFILE]\n'%sys.argv[0])
sys.exit(1)
PATH_TO_SAVE=sys.argv[1]
count_of_songs = int(sys.argv[2])
if argc==4:
print "update PROFILE"
PROFILE=sys.argv[3]
#sys.exit(0)
# find needed profile dir and cookiesdb from it
cookiedbpath = os.environ['HOME']+'/.mozilla/firefox/'
for name in os.listdir(cookiedbpath):
if os.path.isdir(cookiedbpath+name) and (PROFILE in name):
cookiedbpath=cookiedbpath+name+'/cookies.sqlite'
break
what = '.vk.com'
addHash='undef'
connection = db.connect(cookiedbpath)
cursor = connection.cursor()
contents = "name, value"
cursor.execute("SELECT " +contents+ " FROM moz_cookies WHERE host='" +what+ "'")
cookiemas=[]
for row in cursor.fetchall():
cookiemas.append(row[0]+'='+row[1])
connection.close()
cookiestr='; '.join(cookiemas)
tmpdir = '/tmp/add_audio_vk'
songlist=[]
# this is first run, so lets write hash value
if not os.path.isdir(tmpdir):
mus = pycurl.Curl()
ans = StringIO.StringIO()
# let's figure out our pageid
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/feed')
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
profile=re.search('<a href=\"/([^\"]+)\" onclick=\"return nav.go\(this, event, {noback: true}\)\" id=\"myprofile\" class=\"left_row\">',data)
pageid=profile.group(1)
# figure out our hash
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/'+pageid)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
addhash=re.search('Page.audioStatusUpdate\(\'([^\']+)\'\)',data).group(1)
os.mkdir(tmpdir)
fwrite=open(tmpdir+'/addhash','w')
fwrite.write(addhash)
fwrite.close()
fread=open(tmpdir+'/addhash','r')
HASHSUM=fread.read()
fread.close()
# looking for first match
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
mus.setopt(pycurl.POST, 0)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
js = json.loads(data)
if js[1]==False and js[4]==False:
sys.stderr.write('Firefox\'s profile is unauthorized at vk.com\n')
sys.exit(1)
page = js[5]
page1=page
page1 = re.sub(r'cur.au_search = new QuickSearch\(extend\(',r'',page1)
page1 = re.sub(r'\)\);extend\(cur,{module:\'audio\'}\);',r'',page1)
page1 = re.sub(r'\\/',r'/',page1)
page1 = re.sub(r'mp3\?([^"]+)',r'mp3',page1)
page1 = re.sub("(\n|\r).*", '', page1)
page1 = re.sub(',"_new":true\}, \{*','}',page1)
mlist = json.loads(page1)
count=0
for index, mas in mlist['_cache'].iteritems():
#mas[2] - link
#mas[3] - author
#mas[4] - song
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
##
offset=count
if count==200:
while (count>0):
count=0
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
req = '_ajax=1&offset=%d'%(offset)
mus.setopt(pycurl.POSTFIELDS, req)
mus.setopt(pycurl.POST, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
data = re.sub(r'\\/',r'/',data)
data = re.sub(r'mp3\?([^"]+)',r'mp3',data)
mlist = json.loads(data)
mlist=mlist[3][0]
if len(mlist)>0:
for index, mas in mlist.iteritems():
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
offset=offset+count
print "total count: %d"%(len(songlist))
shuffle(songlist)
mkremove = "if [ -e '%(path)s' ]; then rm -r '%(path)s'; fi; mkdir '%(path)s'" % {"path":PATH_TO_SAVE}
os.system(mkremove)
for i in range(count_of_songs):
print "%s - %s" %(songlist[i]['author'],songlist[i]['song'])
os.system("wget -P '%s' %s"%(PATH_TO_SAVE,songlist[i]['link']))
print "complete"
sys.exit(0)
| gpl-2.0 | 6,050,023,060,319,584,000 | 26.819767 | 142 | 0.672727 | false |
veveykocute/Spl | splc.py | 1 | 19239 | import sys
import math
"""A Shakespeare Compiler written in Python, splc.py
This is a compiler that implements the majority of the Shakespeare programming language
invented by Kalle Hasselstrom and Jon Aslund, I take no credit for inventing the language.
This software is free to edit or use, and though I doubt anyone would use this for many projects,
I guess I would appreciate some degree of acknowledgment if you do.
(c) V1.2 Sam Donow 2013-2014
[email protected]
[email protected]"""
#missing features
#full support for multi-word nouns/names
#Stacks, who needs them?
pos_adj = []
neg_adj = []
pos_comp = []
neg_comp = []
pos_nouns = []
neg_nouns = []
valid_names= []
zero_nouns = ['nothing', 'zero']
src = ""
N = 0
vartable = set([])
speaker = ""
target = ""
stage = set([])
actnum = 0
act_names = {}
scene_names= []
#report a compile-time error, then exit
def Assert(b, s):
global N
if not b:
sys.stderr.write(s + " at line " + str(N) + "\n")
sys.exit(1)
#Abstraction for writing to file, eased python 2/3 agnosticity,
#and will eventually allow file output instead of stdout if that
#ever is desired
def writeToFile(s):
sys.stdout.write(str(s) + "\n")
def isNoun(word):
return word in pos_nouns or word in neg_nouns or word in zero_nouns
def isAdjective(word):
return word in pos_adj or word in neg_adj
def isComparative(word):
return word in pos_comp or word in neg_comp
#returns 1 for "nice" and neutral nouns, -1 for nasty ones
def nounValue(word):
Assert(isNoun(word), "Tried to find the nounvalue of a non-noun")
return 1 if word in pos_nouns else -1 if word in neg_nouns else 0
#return s with all whitespace characters removed
def trimWhitespace(s):
trimmed = ""
for c in s:
if c not in ['\t', '\r', '\n', ' ']:
trimmed += c
return trimmed
#return s with all whitespace characters before the first non-whitedspace character removed
def trimLeadingWhitespace(s):
trimIndex = 0
for c in s:
if c in ['\t', '\r', '\n', ' ']:
trimIndex +=1
else:
break
return s[trimIndex:]
#A whitespace-agnositic beginswith method
def beginsWithNoWhitespace(s, pattern):
return beginsWith(trimWhitespace(s), pattern)
def beginsWith(s, pattern):
return s[:len(pattern)] == pattern
def loadFileIntoList(filename, list):
f = open(filename, 'r')
for word in f.readlines():
list.append(word.split(" ")[-1][:-1])
f.close()
#load initial noun and adjective lists
def loadWordLists():
loadFileIntoList("include/neutral_adjective.wordlist" , pos_adj)
loadFileIntoList("include/positive_adjective.wordlist", pos_adj)
loadFileIntoList("include/negative_adjective.wordlist", neg_adj)
loadFileIntoList("include/positive_noun.wordlist", pos_nouns)
loadFileIntoList("include/neutral_noun.wordlist" , pos_nouns)
loadFileIntoList("include/negative_noun.wordlist", neg_nouns)
loadFileIntoList("include/positive_comparative.wordlist", pos_comp)
loadFileIntoList("include/positive_comparative.wordlist", neg_comp)
loadFileIntoList("include/character.wordlist", valid_names)
roman_values = { 'M': 1000, 'D': 500, 'C': 1000, 'L': 50, 'X': 10, 'V': 5, 'I': 1 }
def parseRomanNumeral(roman_string):
roman_string = roman_string.upper()
strindex = 0
roman_sum = 0
while strindex < len(roman_string) - 1:
if(roman_values[roman_string[strindex]] < roman_values[roman_string[strindex+1]]):
roman_sum -= roman_values[roman_string[strindex]]
else:
roman_sum += roman_values[roman_string[strindex]]
strindex += 1
return roman_sum + roman_values[roman_string[strindex]]
def isNumber(s):
words = s.split(" ")
for word in words:
if isNoun(word):
return True
return False
#parse a string that is supposed to evaluate to a number
#if failOk is set to true, will return 0 for phrases that do not evaluate to a number
def parseNum(s, failOk = False):
words = s.split(" ")
nounIndex = len(words)
for i in range(0,len(words)):
if isNoun(words[i]):
nounIndex = i
break
ok = nounIndex < len(words)
if not ok and failOk:
return 0
Assert (ok, str(words) + "\nExpected a number, but found no noun")
value = nounValue(words[nounIndex])
for word in words[:nounIndex]:
if isAdjective(word):
value *= 2
return value
def parseEnterOrExit():
global stage
endBracket = src[N].find(']')
Assert(endBracket >= 0, "[ without matching ]")
enterOrExit = src[N][src[N].find('[')+1:src[N].find(']')]
if beginsWithNoWhitespace(enterOrExit, "Enter"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in vartable, "Undeclared actor entering a scene")
stage.add(name)
Assert(len(stage) < 3, "Too many actors on stage")
elif beginsWithNoWhitespace(enterOrExit, "Exit"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in stage, "Trying to make an actor who is not in the scene exit")
stage.remove(name)
elif beginsWithNoWhitespace(enterOrExit, "Exeunt"):
stage = set([])
else:
Assert(False, "Bracketed clause without Enter, Exit, or Exeunt")
#returns the index of the leftmost punctuation mark in s
def findPunctuation(s):
valids = []
for val in [s.find('.'), s.find('!'), s.find('?')]:
if val >= 0:
valids.append(val)
return -1 if len(valids) == 0 else min(valids)
#returns an array of the punctuation-delimited statements at the current location in the parsing
def getStatements():
global N
statements = []
line = trimLeadingWhitespace(src[N])
unfinished = False
while line.find(':') < 0 and line.find('[') < 0:
punctuation = findPunctuation(line)
if punctuation < 0:
if unfinished == False:
statements.append(line[:-1])
else:
statements[-1] += line[:-1]
N += 1
line = src[N]
unfinished = True
elif punctuation > 0:
if not unfinished:
statements.append("")
statements[-1] += line[:punctuation]
line = line[punctuation + 1:]
unfinished = False
retval = []
for stat in statements:
if len(trimWhitespace(stat)) > 0:
retval.append(stat)
return retval
class Tree:
def __init__(self, v, l, r):
self.value = v
self.left = l
self.right = r
def wordToOperator(op):
if op == "sum":
return "+"
elif op == "difference":
return "-"
elif op == "quotient":
return "/"
elif op == "product":
return "*"
else:
Assert(False, "Illegal Operator")
binop = ["sum", "difference", "quotient", "product"]
unop = ["square", "cube", "twice"]
def buildExpressionTree(expr):
Assert (len(expr) > 0, "Ill-formed Expression in " + str(expr))
if expr[0] == "square":
if expr[1] == "root":
op = "(int)sqrt"
expr = expr[2:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
elif expr[0] == "remainder":
if expr[1] == "of" and expr[2] == "the" and expr[3] == "quotient":
expr = expr[4:]
op = "%"
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
if expr[0] in binop:
op = wordToOperator(expr[0])
expr = expr[1:]
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
elif expr[0] in unop:
op = expr[0]
expr = expr[1:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
if True:
i = 1 if expr[0] == "and" else 0
numstr = ""
while expr[i] not in binop and expr[i] not in unop and expr[i] not in ["and", "remainder"]:
if expr[i] in ["you", "thee", "yourself", "thyself", "thou"]:
expr = expr[i + 1:]
return Tree(target, "", ""), expr
elif expr[i] in ["me", "myself", "i"]:
expr = expr[i + 1:]
return Tree(speaker, "", ""), expr
elif expr[i].capitalize() in vartable:
name = expr[i]
expr = expr[i + 1:]
return Tree(name.capitalize(), "", ""), expr
elif i == len(expr) - 1:
numstr += expr[i]
i = len(expr)
break
else:
numstr += expr[i] + " "
i += 1
if i == len(expr):
expr = []
else:
expr = expr[i:]
if not isNumber(numstr):
return buildExpressionTree(expr)
else:
return Tree(str(parseNum(numstr)), "", ""), expr
def TreeToString(tree):
if tree.left == "":
#just a value
return str(tree.value)
elif tree.right == "":
#unary operator
return str(tree.value) + "(" + TreeToString(tree.left) + ")"
else:
#binary operator
return "(" + TreeToString(tree.left) + " " + str(tree.value) + " " + TreeToString(tree.right) + ")"
def parseExpr(expr):
tree = buildExpressionTree(expr.split(" "))[0]
return TreeToString(tree)
def concatWords(wordArray):
c = ""
for word in wordArray:
c += word
return c
def firstWord(statment):
words = statement.split(" ")
for word in words:
if len(word) > 0:
return word
def parseStatement(stat):
statement = trimLeadingWhitespace(stat).lower()
first = statement.split(" ")[0]
trimmed = trimWhitespace(statement)
if first in ["you", "thou"]:
#this is an assignment of the form Prounoun [as adj as] expression
expr = ""
if statement.rfind("as") >= 0:
expr = statement[statement.rfind("as") + 3:]
else:
expr = statement[len(first) + 1:]
return target + " = " + parseExpr(expr) + " ;\n"
elif trimmed == "openyourheart" or trimmed == "openthyheart":
#numerical output
return 'fprintf(stdout, "%d", ' + target + ');\n'
elif trimmed == "speakyourmind" or trimmed == "speakthymind":
#character output
return 'fprintf(stdout, "%c", (char)' + target + ');\n'
elif trimmed == "listentoyourheart" or trimmed == "listentothyheart":
#numerical input
return 'fgets(inputbuffer, BUFSIZ, stdin);\nsscanf(inputbuffer, "%d", &' + target + ');\n' #" = getchar() - '0';\n"
elif trimmed == "openyourmind" or trimmed == "openyourmind":
#character input
return target + " = getchar();\n"
elif first in ["am", "are", "art", "be", "is"]:
#questions - do not yet support "not"
left = ""
kind = ""
right = ""
if statement.find("as") >= 0:
left, kind, right = statement.split(" as ")
Assert(isAdjective(kind), "Ill-formed conditional in " + statement)
kind = "equal"
elif statement.find("more") >= 0:
words = statement.split(" ")
moreloc = 0
for i in range(0, len(words)):
if words[i] == "more":
moreloc = i
break
Assert(isAdjective(words[moreloc + 1]), "Ill-formed conditional in " + statement)
kind = "greater" if words[moreloc + 1] in pos_adj else "lesser"
left, right = statement.split(" more " + words[moreloc + 1] + " ")
else:
comp = ""
for word in statement.split(" "):
if isComparative(word):
comp = word
break
Assert(len(comp) > 0, "Ill-formed conditional in " + statement)
kind = "greater" if comp in pos_comp else "lesser"
left, right = statement.split(comp)
return "condition = (" + parseExpr(left) + ") " + (">" if kind == "greater" else "<" if kind == "lesser" else "==") + " (" + parseExpr(right) + ");\n"
elif beginsWith(statement, "if so,"):
#positive condition
location = statement.find("if so,")
return "if (condition) {\n " + parseStatement(statement[location + 7:]) + " }\n"
elif beginsWith(statement, "if not,"):
#negative condition
location = statement.find("if not,")
return "if (!condition) {\n " + parseStatement(statement[location + 8:]) + " }\n"
elif beginsWith(statement, "let us") or beginsWith(statement, "we shall") or beginsWith(statement, "we must"):
words = statement.split(" ")
nextTwo = words[2] + " " + words[3]
Assert (nextTwo == "return to" or nextTwo == "proceed to", "Ill-formed goto")
# classic goto with scene or act
if words[4] == "scene" or words[4] == "act":
typeword = words[4] if words[4] == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(parseRomanNumeral(words[5])) + ";\n"
else:
restOfPhrase = concatWords(words[4:])
type_ = "scene" if restOfPhrase in scene_names[actnum].keys() \
else "act" if restOfPhrase in act_names.keys() else "none"
Assert (type_ != "none", "Goto refers to nonexistant act or scene")
nameDict = act_names if type_ == "act" else scene_names[actnum]
typeword = act if type_ == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(nameDict[restOfPhrase]) + ";\n"
else:
return ""
def writeScenes(scenes, isLast):
writeToFile("act" + str(actnum) + ": {\ngoto act_" + str(actnum) + "_scene1;\n}")
for j in range(0, len(scenes)):
writeToFile("act_" + str(actnum) + "_scene" + str(j + 1) + ": {")
writeToFile(scenes[j])
if j < len(scenes) - 1:
writeToFile("goto act_" + str(actnum) + "_scene" + str(j + 2) + ";\n")
elif not isLast:
writeToFile("goto act" + str(actnum + 1) + ";\n")
writeToFile("}")
def handleDeclarations():
global N
global src
#variables, declaration syntax:
#Name, value
declarations = []
unfinished = False
while not beginsWithNoWhitespace(src[N], 'Act'):
Assert(N < len(src) - 1, "File contains no Acts")
if len(trimWhitespace(src[N])) > 0:
if not unfinished:
declarations.append(src[N])
else:
declarations[-1] += src[N]
unfinished = src[N].find('.') < 0
N += 1
for dec in declarations:
commaIndex = dec.find(',')
Assert(commaIndex > 0, "Improper declaration " + str(declarations))
wordsInName = trimLeadingWhitespace(dec[:commaIndex]).split(" ")
varname = wordsInName[-1]
value = parseNum(dec[commaIndex:-2], True)
writeToFile("int " + str(varname) + " = " + str(value) + ";")
Assert(varname in valid_names, "Non-Shakespearean variable name")
vartable.add(varname)
def getActOrSceneNumber(s, actOrScene):
num = s[s.find(actOrScene):].split(" ")[1]
if num.find(':') > 0:
num = num[:num.find(':')]
else:
Assert (False, "Bad " + actOrScene + " heading")
return parseRomanNumeral(num)
def getActOrSceneDescription(s):
desc = trimWhitespace(s[s.find(':')+1:]).lower()
p = findPunctuation(desc)
if p > 0:
desc = desc[:p]
return desc
# Gets all the names of scenes and acts, and adds them to the respective tables
# This must be done in a preprocessing step, in order to enable gotos to future acts/scenes
def parseAllActAndSceneDescriptions():
global scene_names
global act_names
current_act = 0
current_scene = 0
scene_names = [{}]
for line in src:
if beginsWithNoWhitespace(line, "Act"):
desc = getActOrSceneDescription(line)
current_act += 1
act_names[desc] = current_act
scene_names.append(dict())
current_scene = 0
elif beginsWithNoWhitespace(line, "Scene"):
desc = getActOrSceneDescription(line)
current_scene += 1
scene_names[current_act][desc] = current_scene
#-------------------------------Begin Main Program-------------------------#
Assert(len(sys.argv) > 1, "No input file")
filename = sys.argv[1]
f = open(filename, 'r')
src = f.readlines()
f.close()
loadWordLists()
#parse the title - all the text up until the first .
#title is unimportant and is thrown out
while src[N].find('.') < 0:
N += 1
N += 1
#title is thrown out
writeToFile("// " + filename + "\n" +
"// compiled with splc.py (c) Sam Donow 2013-2015\n" +
"#include <stdio.h>\n" +
"#include <math.h>\n" +
'#include "include/mathhelpers.h"\n' +
"int condition = 0;\n" +
"char inputbuffer[BUFSIZ];\n" +
"int main() {\n")
handleDeclarations()
parseAllActAndSceneDescriptions()
scenes = []
unfinished = False
while N < len(src):
if beginsWithNoWhitespace(src[N], 'Act'):
Assert (getActOrSceneNumber(src[N], 'Act') == actnum + 1, "Illegal Act numbering")
if actnum > 0:
writeScenes(scenes, False)
scenes = []
actnum += 1
#act_names[getActOrSceneDescription(src[N])] = actnum
N += 1
elif beginsWithNoWhitespace(src[N], 'Scene'):
Assert (getActOrSceneNumber(src[N], 'Scene') == len(scenes) + 1, "Illegal Scene numbering")
#scene_names[getActOrSceneDescription(src[N])] = len(scenes) + 1
N += 1
speaker = ""
target = ""
while (N < len(src)) and not (beginsWithNoWhitespace(src[N], 'Scene') or beginsWithNoWhitespace(src[N], 'Act')):
if beginsWithNoWhitespace(src[N], '['):
parseEnterOrExit()
if not unfinished:
scenes.append(";\n")
unfinished = True
N += 1
elif src[N].find(':') >= 0:
name = (src[N][:src[N].find(':')]).split(" ")[-1]
Assert (name in stage, "An actor who is not on stage is trying to speak")
for actor in stage:
if actor != name:
target = actor
speaker = name
N += 1
statements = getStatements()
scenecode = ""
for statement in statements:
scenecode += parseStatement(statement)
if not unfinished:
scenes.append(scenecode)
unfinished = True
else:
scenes[-1] += scenecode
else:
N += 1
unfinished = False
else:
N += 1
writeScenes(scenes, True)
writeToFile("}")
| unlicense | 5,124,951,847,631,947,000 | 34.561922 | 158 | 0.565466 | false |
IndyMPO/IndyGeoTools | ConvertGeography/GetAreaConversionMatrix.py | 1 | 3774 | #This script copyright 2017 Indianapolis Metropolitan Planning Organization
from __future__ import division
import arcpy
import os
import pandas as pd
import numpy as np
from subprocess import Popen
import sys
def clear_temp():
'''
Clears the temporary directory that is created when running this tool
'''
temp_dir = r'C:\TEMP'
for f in os.listdir(temp_dir): #Remove all files within the directory
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir) #Remove the directory itself
def main(*args):
#Read in inputs
from_shp_file = args[0]
from_field = args[1]
to_shp_file = args[2]
to_field = args[3]
outfile = args[4]
show_matrix = args[5]
remove_temp_if_successful = args[6]
remove_temp_if_error = args[7]
if from_field == to_field:
to_field += '_1'
#Check if the outfile is specified as a csv file. If it isn't, do so.
if outfile[-4:] != '.csv':
outfile += '.csv'
#Create temporary directory
temp_dir = r'C:\TEMP'
os.mkdir(temp_dir)
temp_shp = os.path.join(temp_dir, 'TEMP.shp')
from_shp = os.path.join(temp_dir, 'FROM.shp')
to_shp = os.path.join(temp_dir, 'TO.shp')
#Copy input shapefiles into temporary directory
arcpy.CopyFeatures_management(from_shp_file, from_shp)
arcpy.CopyFeatures_management(to_shp_file, to_shp)
#Process the data. If an error occurs, the temporary directory will be deleted, and then the exception will be raised
try:
#Intersect the two shapefiles and calculate the area of the intersected shapefile
arcpy.Intersect_analysis([from_shp, to_shp], temp_shp)
temp2_shp = temp_shp.replace('.shp', '2.shp')
arcpy.CalculateAreas_stats(temp_shp, temp2_shp)
#Create a list of all of the origin and destination polygons
from_list = []
to_list = []
polygons = arcpy.da.SearchCursor(temp_shp, [from_field, to_field])
for polygon in polygons:
from_list += [polygon[0]]
to_list += [polygon[1]]
del polygons
from_codes = pd.Series(from_list).value_counts().index
to_codes = pd.Series(to_list).value_counts().index
#Create matrix with total area of each intersected polygon, arranged by the from polygon and to polygon
areas = pd.DataFrame(np.zeros((len(to_codes), len(from_codes))), index = to_codes, columns = from_codes)
polygons = arcpy.da.SearchCursor(temp2_shp, [from_field, to_field, 'F_AREA'])
for polygon in polygons:
areas.loc[polygon[1], polygon[0]] = polygon[2]
del polygons
#Divide each column of the matrix by its sum
total = areas.sum(0)
out_data = areas.copy()
for row in out_data.index:
out_data.loc[row] /= total
#Write to csv, and delete the temporary directory
out_data.to_csv(outfile)
if remove_temp_if_successful:
clear_temp()
except Exception as e:
if remove_temp_if_error:
clear_temp()
exc_type, exc_obj, exc_tb = sys.exc_info()
print (exc_tb.tb_lineno)
raise e
#Open the file if instructed to do so
if show_matrix:
Popen(outfile, shell = True)
if __name__ == '__main__':
from_shp_file = arcpy.GetParameterAsText(0)
from_field = arcpy.GetParameterAsText(1)
to_shp_file = arcpy.GetParameterAsText(2)
to_field = arcpy.GetParameterAsText(3)
outfile = arcpy.GetParameter(4)
show_matrix = arcpy.GetParameter(5)
remove_temp_if_successful = arcpy.GetParameter(6)
remove_temp_if_error = arcpy.GetParameter(7)
main(from_shp_file, from_field, to_shp_file, to_field, outfile, show_matrix, remove_temp_if_successful, remove_temp_if_error)
| apache-2.0 | -4,349,660,485,601,096,700 | 34.271028 | 129 | 0.642024 | false |
mission-peace/interview | python/dynamic/weighted_job_scheduling_max_profit.py | 1 | 1192 | """
Problem Statement
=================
Given set of jobs with start and end interval and profit, how to maximize profit such that jobs in subset do not
overlap.
Video
-----
* https://youtu.be/cr6Ip0J9izc
Complexity
----------
* Runtime Complexity: O(n^2)
* Space Complexity: O(n)
Reference Link
--------------
* http://www.cs.princeton.edu/courses/archive/spr05/cos423/lectures/06dynamic-programming.pdf
"""
def can_sequence(job1, job2):
_, job1_finish_time = job1
job2_start_time, _ = job2
return job1_finish_time <= job2_start_time
def find_max_profit(jobs):
sequenced_jobs = sorted(jobs.keys(), key=lambda x: x[1])
T = [jobs[job_key] for job_key in sequenced_jobs]
num_jobs = len(sequenced_jobs)
for j in range(1, num_jobs):
for i in range(0, j):
if can_sequence(sequenced_jobs[i], sequenced_jobs[j]):
T[j] = max(T[j], T[i] + jobs[sequenced_jobs[j]])
return max(T)
if __name__ == '__main__':
jobs = {
(1, 3): 5, # (start_time, end_time, total_cost)
(2, 5): 6,
(4, 6): 5,
(6, 7): 4,
(5, 8): 11,
(7, 9): 2
}
assert 17 == find_max_profit(jobs)
| apache-2.0 | -7,740,483,645,734,945,000 | 21.490566 | 112 | 0.568792 | false |
nthorne/xmppmote | configuration/test/test_commands.py | 1 | 7152 | #!/usr/bin/env python
#Copyright (C) 2012 Niklas Thorne.
#This file is part of XMPPMote.
#
#XMPPMote is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#XMPPMote is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with XMPPMote. If not, see <http://www.gnu.org/licenses/>.
""" This module provides unit tests for the commands module. """
import sys
import os
sys.path.append(os.path.abspath("../.."))
import mox
import unittest
from ConfigParser import SafeConfigParser
from ConfigParser import NoSectionError
from ConfigParser import NoOptionError
from configuration.commands import get_command_handler
from configuration.commands import UnknownHandler
from configuration.commands import restricted_set
from configuration.commands import MalformedCommand
from configuration.configurationparser import ConfigurationParser
from bot import commandhandlers
class GetCommandHandlerTest(mox.MoxTestBase):
""" Provides test cases for the get_command_handler function. """
def test_getting_existing_commandhandlers(self):
""" If any of the two known command handlers are configured, an instance
of the named command handler should be returned by get_command_handler
"""
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
# case this one wierdly just to make sure that character casing is taken
# into consideration when parsing the string..
config.get("general", "handler").AndReturn("rEstrIctEd")
config.get("general", "handler").AndReturn("pAssthrU")
self.mox.ReplayAll()
expected_type = commandhandlers.RestrictedCommandHandler()
self.assertEquals(type(get_command_handler()),
type(expected_type))
expected_type = commandhandlers.UnsafeCommandHandler()
self.assertEquals(type(get_command_handler()),
type(expected_type))
def test_getting_nonexisting_commandhandler(self):
""" If the command handler returned by the configuration is unknown to
get_command_handler, an UnknownHandler exception should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
config.get("general", "handler").AndReturn("foobar")
self.mox.ReplayAll()
self.assertRaises(UnknownHandler, get_command_handler)
def test_getting_commandhandler_undefined_in_config(self):
""" If either the section or the option that details the command handler
is missing, an UnknownHandler exception should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
config.get("general", "handler").AndRaise(NoSectionError("general"))
config.get("general", "handler").AndRaise(NoOptionError("general",
"handler"))
self.mox.ReplayAll()
self.assertRaises(UnknownHandler, get_command_handler)
self.assertRaises(UnknownHandler, get_command_handler)
class GetRestrictedSetTest(mox.MoxTestBase):
""" Provides test cases for the restricted_set function. """
def test_getting_defined_restricted_set(self):
""" Make sure that properly formed commands are parsed into a list of
command tuples. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([
("foo", "ls::List files"),
("bar", "df:-h:Disk space usage (human readable)"),
("baz", "du:-sh .:"),
("foz", "pwd")
])
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [
("ls", None, "List files"),
("df", ["-h"], "Disk space usage (human readable)"),
("du", ["-sh ."], ""),
("pwd", None, "")
])
def test_restricted_set_missing_section(self):
""" If there is no commands section in the configuration file, an empty
list should be returned. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(False)
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [])
def test_restricted_set_undefined_set(self):
""" If there is a command section defined, but no commands in it, an
empty list should be returned. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([])
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [])
def test_getting_malformed_restricted_set(self):
""" If there is a malformed command defined in the commands section, a
MalformedCommand should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([("foo", "")])
self.mox.ReplayAll()
self.assertRaises(MalformedCommand, restricted_set)
if "__main__" == __name__:
unittest.main()
| gpl-3.0 | -3,692,022,302,636,005,000 | 31.807339 | 80 | 0.657019 | false |
YYWen0o0/python-frame-django | django/forms/fields.py | 1 | 47569 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
warnings.warn(
'Using SplitDateTimeWidget with DateTimeField is deprecated. '
'Use SplitDateTimeField instead.',
RemovedInDjango19Warning, stacklevel=2)
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango20Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
f.content_type = Image.MIME[image.format]
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple([x.__deepcopy__(memo) for x in self.fields])
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
if field._has_changed(field.to_python(initial), data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
super(IPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
| bsd-3-clause | 3,579,217,377,039,823,400 | 38.087099 | 120 | 0.596712 | false |
evidation-health/ContinuousTimeMarkovModel | examples/small_sample_example_main.py | 1 | 6795 | import numpy as np
from theano.tensor import as_tensor_variable
from ContinuousTimeMarkovModel.distributions import *
from pymc3 import Model, sample, Metropolis, Dirichlet, Potential, Binomial, Beta, Slice, NUTS
import theano.tensor as TT
from ContinuousTimeMarkovModel.samplers.forwardS import *
from ContinuousTimeMarkovModel.samplers.forwardX import *
#import sys; sys.setrecursionlimit(50000)
#theano.config.compute_test_value = 'off'
# Load pre-generated data
from pickle import load
datadir = '../data/small_sample/'
infile = open(datadir+'pi.pkl','rb')
pi_start = load(infile)
infile.close()
infile = open(datadir+'Q.pkl','rb')
Q_start = load(infile)
infile.close()
infile = open(datadir+'S.pkl','rb')
S_start = load(infile)
infile.close()
infile = open(datadir+'B.pkl','rb')
B_start = load(infile)
infile.close()
infile = open(datadir+'B0.pkl','rb')
B0_start = load(infile)
infile.close()
infile = open(datadir+'X.pkl','rb')
X_start = load(infile)
infile.close()
infile = open(datadir+'Z.pkl','rb')
Z_start = load(infile)
infile.close()
infile = open(datadir+'L.pkl','rb')
L_start = load(infile)
infile.close()
infile = open(datadir+'obs_jumps.pkl','rb')
obs_jumps = load(infile)
infile.close()
infile = open(datadir+'T.pkl','rb')
T = load(infile)
infile.close()
infile = open(datadir+'O.pkl','rb')
O = load(infile)
infile.close()
#Cut down to 100 people
newN = 100
T = T[:newN]
nObs = T.sum()
S_start = S_start[0:nObs]
obs_jumps = obs_jumps[0:nObs]
X_start = X_start[0:nObs]
O = O[0:nObs]
nObs = S_start.shape[0]
N = T.shape[0] # Number of patients
M = pi_start.shape[0] # Number of hidden states
K = Z_start.shape[0] # Number of comorbidities
D = Z_start.shape[1] # Number of claims
Dd = 16 # Maximum number of claims that can occur at once
#import pdb; pdb.set_trace()
model = Model()
with model:
#Fails: #pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M, testval = np.ones(M)/float(M))
pi = Dirichlet('pi', a = as_tensor_variable(pi_start.copy()), shape=M)
pi_min_potential = Potential('pi_min_potential', TT.switch(TT.min(pi) < .001, -np.inf, 0))
Q = DiscreteObsMJP_unif_prior('Q', M=M, lower=0.0, upper=1.0, shape=(M,M))
#S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs), testval=np.ones(nObs,dtype='int32'))
S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs))
#B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
#B = Beta('B', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M))
B = Beta('B', alpha = 1., beta = 1., shape=(K,M))
#X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K), testval=np.ones((nObs,K),dtype='int8'))
X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K))
#Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D), testval=0.5*np.ones((K,D)))
#L = Beta('L', alpha = 1., beta = 1., shape=D, testval=0.5*np.ones(D))
Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D))
L = Beta('L', alpha = 1., beta = 1., shape=D)
O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, O_input=O, shape=(nObs,Dd), observed=O)
#O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, max_obs=max_obs, O_input=O, shape=(Dd,max_obs,N), observed=O)
#import pdb; pdb.set_trace()
from scipy.special import logit
Q_raw = []
for i in range(Q_start.shape[0]-1):
Q_raw.append(Q_start[i,i+1])
Q_raw_log = logit(np.asarray(Q_raw))
B_lo = logit(B_start)
B0_lo = logit(B0_start)
Z_lo = logit(Z_start)
L_lo = logit(L_start)
start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo}
#teststart = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo, 'pi_stickbreaking':np.ones(M)/float(M)}
#start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_start}
with model:
#import pdb; pdb.set_trace()
steps = []
steps.append(NUTS(vars=[pi]))
#steps.append(NUTS(vars=[pi], scaling=np.ones(M-1)*0.058))
#steps.append(Metropolis(vars=[pi], scaling=0.058, tune=False))
steps.append(NUTS(vars=[Q],scaling=np.ones(M-1,dtype=float)*10.))
#steps.append(Metropolis(vars=[Q], scaling=0.2, tune=False))
steps.append(ForwardS(vars=[S], nObs=nObs, T=T, N=N, observed_jumps=obs_jumps))
steps.append(NUTS(vars=[B0,B]))
#steps.append(Metropolis(vars=[B0], scaling=0.2, tune=False))
#steps.append(NUTS(vars=[B]))
#steps.append(Metropolis(vars=[B], scaling=0.198, tune=False))
steps.append(ForwardX(vars=[X], N=N, T=T, K=K, D=D,Dd=Dd, O=O, nObs=nObs))
steps.append(NUTS(vars=[Z], scaling=np.ones(K*D)))
#steps.append(Metropolis(vars=[Z], scaling=0.0132, tune=False))
steps.append(NUTS(vars=[L],scaling=np.ones(D)))
#steps.append(Metropolis(vars=[L],scaling=0.02, tune=False, ))
## 22 minutes per step with all NUTS set
#import pdb; pdb.set_trace()
#model.dlogp()
trace = sample(1001, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=[111,112,113],progressbar=False,njobs=3)
pi = trace[pi]
Q = trace[Q]
S = trace[S]
#S0 = S[:,0] #now pibar
B0 = trace[B0]
B = trace[B]
X = trace[X]
Z = trace[Z]
L = trace[L]
Sbin = np.vstack([np.bincount(S[i],minlength=6)/float(len(S[i])) for i in range(len(S))])
zeroIndices = np.roll(T.cumsum(),1)
zeroIndices[0] = 0
pibar = np.vstack([np.bincount(S[i][zeroIndices],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
pibar = np.vstack([np.bincount(S_start[zeroIndices],minlength=M)/float(zeroIndices.shape[0]),pibar])
SEnd = np.vstack([np.bincount(S[i][zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
SEnd = np.vstack([np.bincount(S_start[zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]),SEnd])
logp = steps[2].logp
Xlogp = steps[4].logp
XChanges = np.insert(1-(1-(X[:,1:]-X[:,:-1])).prod(axis=2),0,0,axis=1)
XChanges.T[zeroIndices] = 0
XChanges[XChanges.nonzero()] = XChanges[XChanges.nonzero()]/XChanges[XChanges.nonzero()]
XChanges = XChanges.sum(axis=1)/float(N)
logpTotal = [model.logp(trace[i]) for i in range(len(trace))]
#np.set_printoptions(2);np.set_printoptions(linewidth=160)
'''
for i in range(1001):
print "~~~",i ,"~~~"
print pi[i,:]
print "Bincount S0:", np.bincount(S0[i,:],minlength=6)
print "\n"
'''
#from pickle import dump
#with open('file.pkl','wb') as file:
# dump(trace,file)
| mit | -8,107,003,969,611,124,000 | 38.051724 | 186 | 0.652686 | false |
nbeck90/data_structures_2 | test_insert_sort.py | 1 | 1280 | # -*- coding: utf-8 -*-
import pytest
from insert_sort import insert_sort
def test_sorted():
my_list = list(range(100))
insert_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
insert_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
insert_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
insert_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
insert_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
insert_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
insert_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
insert_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
insert_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [insert_sort(my_list)]
assert new_list == [None]
def test_non_iterable():
with pytest.raises(TypeError):
insert_sort(42)
| mit | 6,873,774,539,873,573,000 | 19.629032 | 47 | 0.530884 | false |
Wyliodrin/wyliodrin-server | tests/debugging/sim_board.py | 1 | 3409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Board
"""
import getpass
import logging
import os
import signal
import sleekxmpp
import ssl
import sys
import threading
import time
from sleekxmpp import Message, Presence
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
JID = "[email protected]"
PASS = "wyliodrin"
MESSAGE = None
gdb_commands_pipe_name = "/tmp/gdb_commands"
gdb_results_pipe_name = "/tmp/gdb_results"
class W(ElementBase):
"""
<w xmlns="wyliodrin" d="<msgpack_data>"/>
"""
name = 'w'
namespace = 'wyliodrin'
plugin_attrib = 'w'
interfaces = set(('d',))
class SimBoard(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, pipeout):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.pipeout = pipeout
self.add_event_handler("session_start", self.start, threaded=False)
self.register_handler(
Callback('Some custom message',
StanzaPath('message/w'),
self._handle_action))
self.add_event_handler('custom_action',
self._handle_action_event,
threaded=True)
register_stanza_plugin(Message, W)
def start(self, event):
global MESSAGE
# Send priority
prio = self.Presence()
prio['lang'] = None
prio['to'] = None
prio['priority'] = '50'
prio.send()
# Save message
MESSAGE = self.Message()
MESSAGE['lang'] = None
MESSAGE['to'] = "[email protected]"
def _handle_action(self, msg):
self.event('custom_action', msg)
def _handle_action_event(self, msg):
self.pipeout.write(msg['w']['d'])
self.pipeout.flush()
class Listener(threading.Thread):
def __init__(self, pipein):
threading.Thread.__init__(self)
self.pipein = pipein
def run(self):
global MESSAGE
while True:
# Get result
content = os.read(self.pipein.fileno(), 3 * 1024).decode("utf-8")
MESSAGE['w']['d'] = content
MESSAGE.send()
if __name__ == '__main__':
# Create the commands and results pipes
if not os.path.exists(gdb_commands_pipe_name):
os.mkfifo(gdb_commands_pipe_name)
if not os.path.exists(gdb_results_pipe_name):
os.mkfifo(gdb_results_pipe_name)
# Open pipes
gdb_commands_pipe_fd = open(gdb_commands_pipe_name, 'w')
gdb_results_pipe_fd = open(gdb_results_pipe_name, 'r')
listener = Listener(gdb_results_pipe_fd)
listener.start()
# Setup logging.
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
xmpp = SimBoard(JID, PASS, gdb_commands_pipe_fd)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # XMPP Ping
xmpp.ssl_version = ssl.PROTOCOL_SSLv3
xmpp.auto_authorize = True
xmpp.auto_subscribe = True
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| lgpl-3.0 | -6,279,385,483,405,430,000 | 21.136364 | 71 | 0.675858 | false |
KirillMysnik/ArcJail | srcds/addons/source-python/plugins/arcjail/modules/games/base_classes/map_game_team_based.py | 1 | 16195 | # This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from random import shuffle
from core import GAME_NAME
from ....internal_events import InternalEvent
from ....resource.strings import COLOR_SCHEME
from ...jail_map import teleport_player, get_games
from ...player_colors import cancel_color_request, make_color_request
from ...players import broadcast, player_manager, tell
from ...rebels import get_rebels
from ...skins import model_player_manager
from .. import (
config_manager, game_event_handler, game_internal_event_handler,
helper_set_winner, helper_set_loser, MIN_PLAYERS_IN_GAME, stage,
strings_module)
from .map_game import MapGame
COLOR_PRIORITY = 2
SKIN_PRIORITY = 2
TEAM_NUM_MIN = 2
TEAM_NUM_MAX = 4
class MapGameTeamBased(MapGame):
class PlayerTeam(list):
def __init__(self, team_num, *args, **kwargs):
super().__init__(*args, **kwargs)
self.team_num = team_num
@property
def team_num_str(self):
return 'team{}'.format(self.team_num)
_caption = '$games_base title_mapgame_teambased'
num_teams = 2
stage_groups = {
'mapgame-prepare': [
"mapgame-cancel-falldmg-protection",
"mapgame-equip-noblock",
"mapgame-teambased-split-teams",
"mapgame-teleport-players",
"mapgame-fire-mapdata-prepare-outputs",
"mapgame-prepare-entry",
],
'mapgame-teambased-split-teams': [
"mapgame-teambased-split-teams",
],
'mapgame-teambased-split-teams2': [
"mapgame-teambased-split-teams2",
],
'mapgame-teambased-split-teams3': [
"mapgame-teambased-split-teams3",
],
'mapgame-teambased-split-teams4': [
"mapgame-teambased-split-teams4",
],
'mapgame-teleport-players2': ["mapgame-teleport-players2", ],
'mapgame-teleport-players3': ["mapgame-teleport-players3", ],
'mapgame-teleport-players4': ["mapgame-teleport-players4", ],
'game-end-draw': ['game-end-draw', ],
'game-end-win-team1': ['game-end-win-team1', ],
'game-end-win-team2': ['game-end-win-team2', ],
'game-end-win-team3': ['game-end-win-team3', ],
'game-end-win-team4': ['game-end-win-team4', ],
}
def __init__(self, leader_player, players, **kwargs):
super().__init__(leader_player, players, **kwargs)
assert TEAM_NUM_MIN <= self.num_teams <= TEAM_NUM_MAX
self._starting_player_number = len(players)
self._team1 = self.PlayerTeam(1)
self._team2 = self.PlayerTeam(2)
self._team3 = self.PlayerTeam(3)
self._team4 = self.PlayerTeam(4)
@property
def team1(self):
return tuple(self._team1)
@property
def team2(self):
return tuple(self._team2)
@property
def team3(self):
return tuple(self._team3)
@property
def team4(self):
return tuple(self._team4)
def get_player_team(self, player):
if player in self._team1:
return self._team1
if player in self._team2:
return self._team2
if player in self._team3:
return self._team3
if player in self._team4:
return self._team4
raise IndexError("Player does not belong to this game")
@stage('mapgame-teleport-players')
def stage_mapgame_teleport_players(self):
self.insert_stage_group(
"mapgame-teleport-players{}".format(self.num_teams))
@stage('mapgame-teleport-players2')
def stage_mapgame_teleport_players2(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teleport-players3')
def stage_mapgame_teleport_players3(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team3'))
shuffle(spawnpoints)
for player in self._team3:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teleport-players4')
def stage_mapgame_teleport_players4(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team3'))
shuffle(spawnpoints)
for player in self._team3:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team4'))
shuffle(spawnpoints)
for player in self._team4:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teambased-split-teams')
def stage_mapgame_teambased_split_teams(self):
self.insert_stage_group(
"mapgame-teambased-split-teams{}".format(self.num_teams))
@stage('undo-mapgame-teambased-split-teams')
def stage_undo_mapgame_teambased_split_teams(self):
for player in self._players:
model_player_manager[player.index].cancel_request(
'games-teambased')
cancel_color_request(player, 'games-teambased')
@stage('mapgame-teambased-split-teams2')
def stage_mapgame_teambased_split_teams_2(self):
self._team1 = self.PlayerTeam(1)
self._team2 = self.PlayerTeam(2)
players = self._players[:]
shuffle(players)
if GAME_NAME in ("csgo",):
broadcast(strings_module['players_two_teams'].tokenize(
color1=COLOR_SCHEME['color_highlight'],
color2=COLOR_SCHEME['color_highlight'],
team1=strings_module['team1'],
team2=strings_module['team2'],
))
else:
broadcast(strings_module['players_two_teams'].tokenize(
color1=config_manager['team1_color'],
color2=config_manager['team2_color'],
team1=strings_module['team1'],
team2=strings_module['team2'],
))
while players:
p1, p2 = players.pop(), players.pop()
if GAME_NAME in ("csgo",):
tell(p1, strings_module['your_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team1']
))
tell(p2, strings_module['your_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team2']
))
else:
tell(p1, strings_module['your_team'].tokenize(
color=config_manager['team1_color'],
team=strings_module['team1']
))
tell(p2, strings_module['your_team'].tokenize(
color=config_manager['team2_color'],
team=strings_module['team2']
))
if config_manager['prefer_model_over_color']:
model_player_manager[p1.index].make_request(
'games-teambased', SKIN_PRIORITY, "alpha")
model_player_manager[p2.index].make_request(
'games-teambased', SKIN_PRIORITY, "bravo")
else:
make_color_request(
p1, COLOR_PRIORITY, 'games-teambased',
config_manager['team1_color']
)
make_color_request(
p2, COLOR_PRIORITY, 'games-teambased',
config_manager['team2_color']
)
self._team1.append(p1)
self._team2.append(p2)
@stage('mapgame-teambased-split-teams3')
def stage_mapgame_teambased_split_teams_3(self):
raise NotImplementedError
@stage('mapgame-teambased-split-teams4')
def stage_mapgame_teambased_split_teams_4(self):
raise NotImplementedError
@stage('game-end-draw')
def stage_game_end_draw(self):
broadcast(strings_module['draw'])
self.set_stage_group('destroy')
@stage('game-end-win-team1')
def stage_game_end_win_team1(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team1,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=1,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team1']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team1_color'],
team=strings_module['team1']
))
for player in self._team1:
helper_set_winner(player)
for player in (self._team2 + self._team3 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team2')
def stage_game_end_win_team2(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team2,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=2,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team2']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team2_color'],
team=strings_module['team2']
))
for player in self._team2:
helper_set_winner(player)
for player in (self._team1 + self._team3 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team3')
def stage_game_end_win_team3(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team3,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=3,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team3']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team3_color'],
team=strings_module['team3']
))
for player in self._team3:
helper_set_winner(player)
for player in (self._team1 + self._team2 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team4')
def stage_game_end_win_team4(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team4,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=4,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team4']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team4_color'],
team=strings_module['team4']
))
for player in self._team4:
helper_set_winner(player)
for player in (self._team1 + self._team2 + self._team3):
helper_set_loser(player)
self.set_stage_group('destroy')
@game_event_handler('jailgame-player-death', 'player_death')
def event_jailgame_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if self.leader == player:
self.set_stage_group('abort-leader-dead')
elif player in self._players:
self._players.remove(player)
self.get_player_team(player).remove(player)
if not all((self._team1,
self._team2,
self._team3,
self._team4)[:self.num_teams]):
self.set_stage_group('abort-not-enough-players')
elif len(self._players) < MIN_PLAYERS_IN_GAME:
self.set_stage_group('abort-not-enough-players')
@game_internal_event_handler(
'jailgame-main-player-deleted', 'player_deleted')
def event_jailgame_player_deleted(self, player):
if self.leader == player:
self.set_stage_group('abort-leader-disconnect')
elif player in self._players:
self._players.remove(player)
for team_list in (self._team1,
self._team2,
self._team3,
self._team4):
if player in team_list:
team_list.remove(player)
if not (self._team1 and
self._team2 and
self._team3 and
self._team4):
self.set_stage_group('abort-not-enough-players')
elif len(self._players) < MIN_PLAYERS_IN_GAME:
self.set_stage_group('abort-not-enough-players')
@classmethod
def get_available_launchers(cls, leader_player, players):
if get_rebels():
return ()
len_players = len(players)
if len_players < config_manager['min_players_number']:
return ()
if len_players % cls.num_teams:
return ()
result = []
teams = ['team1', 'team2', 'team3', 'team4'][:cls.num_teams]
for map_data in get_games(cls.module):
p_min = map_data['MIN_PLAYERS']
p_max = map_data['MAX_PLAYERS']
if not len(map_data.get_spawnpoints('team0')):
continue
for team_num in teams:
if (len(map_data.get_spawnpoints(team_num)) <
len_players // cls.num_teams):
break
else:
if (len_players >= p_min and
(p_max == -1 or len_players <= p_max)):
result.append(cls.GameLauncher(cls, map_data))
return result
| gpl-3.0 | -3,576,265,509,460,147,000 | 32.25462 | 79 | 0.569312 | false |
mooseman/pdteco | test_parser.py | 1 | 1865 |
# test_parser.py
# Try a few things with creating tokens which know the
# kind of token that should follow them.
import string, itertools
class token(object):
def __init__(self):
self.type = self.next = self.stmttype = None
self.attrdict = vars(self)
# Set an attribute
# NOTE! This can als be used to store values to be passed
# to the next token.
def set(self, attr, val):
setattr(self, attr, val)
# Get an attribute from a token.
def get(self, attr):
return getattr(self, attr)
def display(self):
print self.attrdict
# Test the code
a = token()
a.set('type', 'foo')
a.set('next', 'bar')
a.set('moose', 'big')
print a.get('next')
a.display()
# Create a parser with two modes - character and word.
# Note - we could add a statement checker to this. It would look at the
# stmttype of tokens to determine which kind of statement they belong in.
# When a statement is complete, it can flag that and act accordingly.
# Also - attach actions to statements.
class parser(object):
def __init__(self):
self.toklist = []
self.mode = None
def setmode(self, mode):
self.mode = mode
# Clear the token list
def clear(self):
self.toklist = []
def parse(self, stuff, sep=" "):
if self.mode == 'char':
for ch in stuff:
self.toklist.append(ch)
elif self.mode == 'word':
for tok in stuff.split(sep):
self.toklist.append(tok)
def display(self):
print self.toklist
# Test the code
a = parser()
a.setmode('char')
a.parse('The quick brown fox')
a.display()
a.setmode('word')
a.clear()
a.parse('The quick brown fox')
a.display()
| unlicense | 6,974,440,820,604,117,000 | 21.46988 | 73 | 0.574263 | false |
jtoppins/beaker | Client/src/bkr/client/task_watcher.py | 1 | 5411 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
import time
__all__ = (
"TaskWatcher",
"watch_tasks"
)
def display_tasklist_status(task_list):
state_dict = {}
for task in task_list:
for state, value in task.get_state_dict().iteritems():
state_dict.setdefault(state, 0)
state_dict[state] += value
print "--> " + " ".join(( "%s: %s" % (key, state_dict[key]) for key in sorted(state_dict) )) + " [total: %s]" % sum(state_dict.values())
def watch_tasks(hub, task_id_list, indentation_level=0, sleep_time=30, task_url=None):
"""Watch the task statuses until they finish."""
if not task_id_list:
return
try:
print "Watching tasks (this may be safely interrupted)..."
watcher = TaskWatcher()
for task_id in sorted(task_id_list):
watcher.task_list.append(Task(hub, task_id, indentation_level))
# print task url if task_url is set or TASK_URL exists in config file
task_url = task_url or hub._conf.get("TASK_URL", None)
if task_url is not None:
print "Task url: %s" % (task_url % task_id)
is_failed = False
while True:
all_done = True
changed = False
for task in watcher.task_list:
changed |= watcher.update(task)
is_failed |= watcher.is_failed(task)
all_done &= watcher.is_finished(task)
if changed:
display_tasklist_status(watcher.task_list)
if all_done:
break
time.sleep(sleep_time)
except KeyboardInterrupt:
running_task_list = [ t.task_id for t in watcher.task_list if not watcher.is_finished(t) ]
if running_task_list:
print "Tasks still running: %s" % running_task_list
# Don't report pass on jobs still running.
is_failed = True
return is_failed
class TaskWatcher(object):
display_tasklist_status = staticmethod(display_tasklist_status)
def __init__(self):
self.subtask_dict = {}
self.task_list = []
def is_finished(self, task):
"""Is the task finished?"""
if task.task_info is None:
return False
result = task.task_info.get("is_finished", False)
for subtask in self.subtask_dict.itervalues():
result &= subtask.is_finished()
return result
def is_failed(self, task):
"""Did the task Fail?"""
if task.task_info is None:
return False
result = task.task_info.get("is_failed", False)
for subtask in self.subtask_dict.itervalues():
result |= subtask.is_failed()
return result
def update(self, task):
"""Update info and log if needed. Returns True on state change."""
if self.is_finished(task):
return False
last = task.task_info
task.task_info = task.hub.taskactions.task_info(task.task_id, False)
if task.task_info is None:
print "No such task id: %s" % task.task_id
sys.exit(1)
changed = False
state = task.task_info["state"]
if last:
# compare and note status changes
laststate = last["state"]
if laststate != state:
print "%s: %s -> %s" % (task, task.display_state(last), task.display_state(task.task_info))
changed = True
else:
# first time we're seeing this task, so just show the current state
print "%s: %s" % (task, task.display_state(task.task_info))
changed = True
# update all subtasks
for key in sorted(self.subtask_dict.keys()):
changed |= self.subtask_dict[key].update()
return changed
class Task(object):
def __init__(self, hub, task_id, indentation_level=0):
self.hub = hub
self.task_id = task_id
self.task_info = None
self.indentation_level = int(indentation_level)
self.subtask_dict = {}
def __str__(self):
result = "%s%s" % (" " * self.indentation_level, self.task_id)
if self.task_info:
result += " %s" % self.task_info.get("method", "unknown")
return result
def is_failed(self):
"""Did the task fail?"""
if self.task_info is None:
return False
return self.task_info.get("is_failed", False)
def display_state(self, task_info):
worker = task_info.get("worker")
if worker is not None:
return "%s (%s)" % (task_info["state_label"], worker["name"])
return "%s" % task_info["state_label"]
def get_state_dict(self):
state_dict = {}
if self.task_info is not None:
state = self.task_info.get("state_label", "unknown")
state_dict.setdefault(state, 0)
state_dict[state] += 1
for subtask in self.subtask_dict.itervalues():
for state, value in subtask.get_state_dict().iteritems():
state_dict.setdefault(state, 0)
state_dict[state] += value
return state_dict
| gpl-2.0 | -5,898,918,360,989,663,000 | 32.196319 | 140 | 0.56958 | false |
cordmata/mediaampy | mediaamp/services.py | 1 | 10270 | from functools import partial
import re
from .exceptions import ServiceNotAvailable
services = {}
def register(cls):
if issubclass(cls, BaseService):
key = getattr(cls, 'registry_key', None)
if not key:
key = ' '.join(re.findall('[A-Z][^A-Z]*', cls.__name__))
services[key] = cls
class Endpoint(object):
def __init__(self, path=None, name=None, service=None, **kwargs):
self.path = path
self.name = name
self.service = service
self.default_params = kwargs.copy()
self.default_params.setdefault('schema', '1.0')
def urljoin(self, *args):
parts = (self.service.base_url, self.path, self.name) + args
return '/'.join([
part.lstrip('/') for part in parts if part is not None
]).rstrip('/')
def get(self, extra_path=None, **kwargs):
return self._make_request('get', extra_path, **kwargs)
def put(self, extra_path=None, **kwargs):
return self._make_request('put', extra_path, **kwargs)
def post(self, extra_path=None, **kwargs):
return self._make_request('post', extra_path, **kwargs)
def delete(self, extra_path=None, **kwargs):
return self._make_request('delete', extra_path, **kwargs)
def _make_request(self, method, extra_path=None, **kwargs):
# merge default parameters with those supplied
params = dict(self.default_params, **kwargs.pop('params', {}))
extra_path = extra_path
url = self.urljoin(extra_path)
return self.service.session.request_json(method, url, params=params, **kwargs)
def __call__(self, **kwargs):
""" Override default URL parameters.
Allow custom overrides of defaults to look like object
initialization.
"""
self.default_params.update(kwargs)
return self
class BaseService(object):
def __init__(self, session, base_url):
self.session = session
self.base_url = base_url
self.init_endpoints()
def init_endpoints(self):
for k, v in self.__class__.__dict__.items():
if isinstance(v, Endpoint):
v.service = self
v(account=self.session.account)
if v.name is None:
v.name = k
@property
def Notifications(self):
return Endpoint(name='notify', service=self, account=self.session.account)
DataEndpoint = partial(Endpoint, path='data')
BusinessEndpoint = partial(Endpoint, path='web')
@register
class AccessDataService(BaseService):
Permission = DataEndpoint()
Role = DataEndpoint()
Authorization = BusinessEndpoint()
Lookup = BusinessEndpoint()
Registry = BusinessEndpoint()
@register
class AccountDataService(BaseService):
Account = DataEndpoint()
@register
class CommerceConfigurationDataService(BaseService):
CommerceRegistry = DataEndpoint()
CheckoutConfiguration = DataEndpoint()
FulfillmentConfiguration = DataEndpoint()
PaymentConfiguration = DataEndpoint()
Rule = DataEndpoint()
RuleSet = DataEndpoint()
TaxConfiguration = DataEndpoint()
@register
class CommerceEventDataService(BaseService):
OrderTracking = DataEndpoint()
@register
class CuePointDataService(BaseService):
CuePoint = Endpoint()
CuePointType = Endpoint()
@register
class DeliveryDataService(BaseService):
AccountSettings = DataEndpoint()
AdPolicy = DataEndpoint()
Restriction = DataEndpoint()
UserAgent = DataEndpoint()
@register
class EndUserDataService(BaseService):
Directory = DataEndpoint()
Security = DataEndpoint()
User = DataEndpoint()
Authentication = BusinessEndpoint()
Lookup = BusinessEndpoint()
Self = BusinessEndpoint()
@register
class EntertainmentDataService(BaseService):
AccountSettings = DataEndpoint()
Channel = DataEndpoint()
ChannelSchedule = DataEndpoint()
Company = DataEndpoint()
Credit = DataEndpoint()
Listing = DataEndpoint()
Location = DataEndpoint()
Person = DataEndpoint()
Program = DataEndpoint()
ProgramAvailability = DataEndpoint()
Station = DataEndpoint()
Tag = DataEndpoint()
TvSeason = DataEndpoint()
@register
class EntertainmentIngestDataService(BaseService):
IngestConfig = DataEndpoint()
IngestResult = DataEndpoint()
Process = BusinessEndpoint()
@register
class EntertainmentFeedsService(BaseService):
Feed = Endpoint(name='')
@register
class EntitlementDataService(BaseService):
AccountSettings = DataEndpoint()
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
Device = DataEndpoint()
DistributionRight = DataEndpoint()
DistributionRightLicenseCount = DataEndpoint()
Entitlement = DataEndpoint()
LicenseCount = DataEndpoint()
PhysicalDevice = DataEndpoint()
ProductDevice = DataEndpoint()
Rights = DataEndpoint()
SubjectPolicy = DataEndpoint()
UserDevice = DataEndpoint()
@register
class EntitlementWebService(BaseService):
Entitlements = BusinessEndpoint()
RegisterDevice = BusinessEndpoint()
@register
class EntitlementLicenseService(BaseService):
ContentAccessRules = BusinessEndpoint(schema='1.2')
License = BusinessEndpoint(schema='2.5')
@register
class FeedReaderDataService(BaseService):
registry_key = 'FeedReader Data Service'
FeedRecord = DataEndpoint()
@register
class FeedsDataService(BaseService):
FeedAdapter = DataEndpoint()
FeedConfig = DataEndpoint()
@register
class FeedsService(BaseService):
Feed = Endpoint(name='')
@register
class FileManagementService(BaseService):
FileManagement = BusinessEndpoint()
@register
class IngestDataService(BaseService):
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
Checksum = DataEndpoint()
@register
class IngestService(BaseService):
ingest = Endpoint()
test = Endpoint()
@register
class KeyDataService(BaseService):
Key = DataEndpoint()
UserKey = DataEndpoint()
@register
class LedgerDataService(BaseService):
LedgerEntr = DataEndpoint()
@register
class LiveEventDataService(BaseService):
LiveEncoder = DataEndpoint()
LiveEvent = DataEndpoint()
@register
class LiveEventService(BaseService):
Scheduling = BusinessEndpoint()
@register
class MediaDataService(BaseService):
AccountSettings = DataEndpoint()
AssetType = DataEndpoint()
Category = DataEndpoint()
Media = DataEndpoint()
MediaDefaults = DataEndpoint()
MediaFile = DataEndpoint()
Provider = DataEndpoint()
Release = DataEndpoint()
Server = DataEndpoint()
@register
class MessageDataService(BaseService):
EmailTemplate = DataEndpoint()
MessageInstruction = DataEndpoint()
MessageQueue = DataEndpoint()
NotificationFilter = DataEndpoint()
@register
class PlayerService(BaseService):
Player = Endpoint(name='p')
@register
class PlayerDataService(BaseService):
AccountSettings = DataEndpoint()
ColorScheme = DataEndpoint()
Layout = DataEndpoint()
Player = DataEndpoint()
PlugIn = DataEndpoint()
Skin = DataEndpoint()
@register
class ProductFeedsService(BaseService):
Feed = Endpoint(name='')
@register
class ProductDataService(BaseService):
AccountSettings = DataEndpoint()
PricingTemplate = DataEndpoint()
Product = DataEndpoint()
ProductTag = DataEndpoint()
Subscription = DataEndpoint()
@register
class PromotionDataService(BaseService):
Promotion = DataEndpoint()
PromotionAction = DataEndpoint()
PromotionCode = DataEndpoint()
PromotionCondition = DataEndpoint()
PromotionUseCount = DataEndpoint()
@register
class PublishDataService(BaseService):
Adapter = DataEndpoint()
AdapterConfiguration = DataEndpoint()
PublishProfile = DataEndpoint()
@register
class PublishService(BaseService):
Publish = BusinessEndpoint()
Sharing = BusinessEndpoint()
@register
class SelectorService(BaseService):
Selector = Endpoint(name='')
@register
class SharingDataService(BaseService):
OutletProfile = DataEndpoint()
ProviderAdapter = DataEndpoint()
@register
class SocialDataService(BaseService):
AccountSettings = DataEndpoint()
Comment = DataEndpoint()
Rating = DataEndpoint()
TotalRating = DataEndpoint()
@register
class AdminStorefrontService(BaseService):
Action = DataEndpoint()
Contract = DataEndpoint()
FulfillmentItem = DataEndpoint()
Order = DataEndpoint()
OrderFulfillment = DataEndpoint()
OrderHistory = DataEndpoint()
OrderItem = DataEndpoint()
OrderSummary = DataEndpoint()
PaymentInstrumentInfo = DataEndpoint()
Shipment = DataEndpoint()
Checkout = BusinessEndpoint(schema='1.5')
Payment = BusinessEndpoint(schema='1.1')
@register
class StorefrontService(BaseService):
Checkout = BusinessEndpoint(schema='1.4')
Payment = BusinessEndpoint(schema='1.1')
OrderHistory = DataEndpoint()
PaymentInstrumentInfo = DataEndpoint()
@register
class TaskService(BaseService):
Agent = DataEndpoint()
Batch = DataEndpoint()
Task = DataEndpoint()
TaskTemplate = DataEndpoint()
TaskType = DataEndpoint()
@register
class UserDataService(BaseService):
Directory = DataEndpoint()
Security = DataEndpoint()
User = DataEndpoint()
Authentication = BusinessEndpoint()
Lookup = BusinessEndpoint()
Self = BusinessEndpoint()
@register
class UserProfileDataService(BaseService):
AccountSettings = DataEndpoint()
TotalItem = DataEndpoint()
UserList = DataEndpoint()
UserListItem = DataEndpoint()
UserProfile = DataEndpoint()
@register
class ValidationService(BaseService):
Validation = BusinessEndpoint(schema='1.1')
@register
class ValidationDataService(BaseService):
ConditionalRule = DataEndpoint()
ValidationRule = DataEndpoint()
Validator = DataEndpoint()
@register
class WatchFolderDataService(BaseService):
registry_key = 'WatchFolder Data Service'
WatchFolder = DataEndpoint()
WatchFolderFile = DataEndpoint()
@register
class WorkflowDataService(BaseService):
ProfileResult = DataEndpoint()
ProfileStepResult = DataEndpoint()
WorkflowQueue = DataEndpoint()
| apache-2.0 | 8,157,886,516,609,446,000 | 23.452381 | 86 | 0.701168 | false |
TUM-AERIUS/Aerius | Raspberry/Stereo/photo-client.py | 1 | 1956 | import io
import socket
import struct
import time
import picamera
# Connect a client socket to my_server:8000 (change my_server to the
# hostname of your server)
client_socket = socket.socket()
client_socket.connect(('169.254.251.208', 8000))
# Make a file-like object out of the connection
connection = client_socket.makefile('rwb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
data = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
# openCv represents images in bgr format as NumPy arrays
for foo in camera.capture_continuous(stream, format="jpeg"):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 30:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
data = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
if data == "e":
break
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close() | mit | -2,041,568,765,425,169,400 | 35.240741 | 81 | 0.622188 | false |
lzkelley/zcode | zcode/math/statistic.py | 1 | 25108 | """General functions for mathematical and numerical operations.
Functions
---------
- confidence_bands - Bin by `xx` to calculate confidence intervals in `yy`.
- confidence_intervals - Compute the values bounding desired confidence intervals.
- cumstats - Calculate a cumulative average and standard deviation.
- log_normal_base_10 -
- percentiles -
- stats - Get basic statistics for the given array.
- stats_str - Return a string with the statistics of the given array.
- sigma - Convert from standard deviation to percentiles.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import numpy as np
import scipy as sp
import scipy.stats # noqa
from zcode import utils
from zcode.math import math_core
__all__ = [
'confidence_bands', 'confidence_intervals',
'cumstats', 'frac_str', 'info', 'log_normal_base_10', 'mean',
'percs_from_sigma', 'quantiles', 'random_power', 'sigma',
'stats', 'stats_str', 'std',
'LH_Sampler',
# DEPRECATED
'percentiles'
]
def confidence_bands(xx, yy, xbins=10, xscale='lin', percs=[0.68, 0.95], filter=None):
"""Bin the given data with respect to `xx` and calculate confidence intervals in `yy`.
Arguments
---------
xx : array_like scalars
Data values for the axis by which to bin.
yy : array_like scalars
Data values for the axis in which to calculate confidence intervals, with values
corresponding to each of the `xx` values. Must have the same number of elements
as `xx`.
xbins : int or array_like of scalar
Specification for bins in `xx`. Either a
* int, describing the number of bins `N` to create automatically with scale `xscale`.
* array_like scalar, describing the `N+1` edges of each bin (left and right).
xscale : str
Specification of xbin scaling if bins are to be calculated automatically, {'lin', 'log'}.
Ignored if bin edges are given explicitly to `xbins`.
confInt : scalar or array_like of scalar
The percentage confidence intervals to calculate (e.g. 0.5 for median).
Must be between {0.0, 1.0}.
filter : str or `None`
Returns
-------
(for number of bins `N`)
count : (N,) array of int
The number of points in each xbin.
med : (N,) array of float
The median value of points in each bin
conf : array or ndarray of float
Values describing the confidence intervals.
If a single `confInt` is given, this will have shape (N,2);
If `M` `confInt` values are given, this will have shape (N,M,2)
Where in each case the 0th and 1st element of the last dimension is the lower and upper
confidence bounds respectively.
xbins : (N+1,) array of float
Location of bin edges.
"""
squeeze = False
if not np.iterable(percs):
squeeze = True
percs = [percs]
xx = np.asarray(xx).flatten()
yy = np.asarray(yy).flatten()
if xx.shape != yy.shape:
errStr = "Shapes of `xx` and `yy` must match ('{}' vs. '{}'."
errStr = errStr.format(str(xx.shape), str(yy.shape))
raise ValueError(errStr)
# Filter based on whether `yy` values match `filter` comparison to 0.0
if filter is not None:
compFunc = math_core._comparison_function(filter)
inds = compFunc(yy, 0.0)
xx = xx[inds]
yy = yy[inds]
# Create bins
xbins = math_core.asBinEdges(xbins, xx, scale=xscale)
nbins = xbins.size - 1
# Find the entries corresponding to each bin
groups = math_core.groupDigitized(xx, xbins[1:], edges='right')
# Allocate storage for results
med = np.zeros(nbins)
conf = np.zeros((nbins, np.size(percs), 2))
count = np.zeros(nbins, dtype=int)
# Calculate medians and confidence intervals
for ii, gg in enumerate(groups):
count[ii] = np.size(gg)
if count[ii] == 0: continue
mm, cc = confidence_intervals(yy[gg], percs=percs)
med[ii] = mm
conf[ii, ...] = cc[...]
if squeeze:
conf = conf.squeeze()
return count, med, conf, xbins
def confidence_intervals(vals, sigma=None, percs=None, weights=None, axis=None,
filter=None, return_ci=False,
# DEPRECATED ARGUMENTS:
ci=None):
"""Compute the values bounding the target confidence intervals for an array of data.
Arguments
---------
vals : array_like of scalars
Data over which to calculate confidence intervals.
This can be an arbitrarily shaped ndarray.
sigma : (M,) array_like of float
Confidence values as standard-deviations, converted to percentiles.
percs : (M,) array_like of floats
List of desired confidence intervals as fractions (e.g. `[0.68, 0.95]`)
axis : int or None
Axis over which to calculate confidence intervals, or 'None' to marginalize over all axes.
filter : str or `None`
Filter the input array with a boolean comparison to zero.
If no values remain after filtering, ``NaN, NaN`` is returned.
return_ci : bool
Return the confidence-interval values used (i.e. percentiles)
ci : DEPRECATED, use `percs` instead
Returns
-------
med : scalar
Median of the input data.
`None` if there are no values (e.g. after filtering).
conf : ([L, ]M, 2) ndarray of scalar
Bounds for each confidence interval. Shape depends on the number of confidence intervals
passed in `percs`, and the input shape of `vals`.
`None` if there are no values (e.g. after filtering).
If `vals` is 1D or `axis` is 'None', then the output shape will be (M, 2).
If `vals` has more than one-dimension, and `axis` is not 'None', then the shape `L`
will be the shape of `vals`, minus the `axis` axis.
For example,
if ``vals.shape = (4,3,5)` and `axis=1`, then `L = (4,5)`
the final output shape will be: (4,5,M,2).
percs : (M,) ndarray of float, optional
The percentile-values used for calculating confidence intervals.
Only returned if `return_ci` is True.
"""
percs = utils.dep_warn_var("ci", ci, "percs", percs)
if percs is not None and sigma is not None:
raise ValueError("Only provide *either* `percs` or `sigma`!")
if percs is None:
if sigma is None:
sigma = [1.0, 2.0, 3.0]
percs = percs_from_sigma(sigma)
percs = np.atleast_1d(percs)
if np.any(percs < 0.0) or np.all(percs > 1.0):
raise ValueError("`percs` must be [0.0, 1.0]! {}".format(stats_str(percs)))
# PERC_FUNC = np.percentile
def PERC_FUNC(xx, pp, **kwargs):
return quantiles(xx, pp/100.0, weights=weights, **kwargs)
# Filter input values
if filter is not None:
# Using the filter will flatten the array, so `axis` wont work...
kw = {}
if (axis is not None) and np.ndim(vals) > 1:
kw['axis'] = axis
if weights is not None:
raise NotImplementedError("`weights` argument does not work with `filter`!")
vals = math_core.comparison_filter(vals, filter, mask=True) # , **kw)
# vals = np.ma.filled(vals, np.nan)
# PERC_FUNC = np.nanpercentile # noqa
if vals.size == 0:
return np.nan, np.nan
# Calculate confidence-intervals and median
cdf_vals = np.array([(1.0-percs)/2.0, (1.0+percs)/2.0]).T
# This produces an ndarray with shape `[M, 2(, L)]`
# If ``axis is None`` or `np.ndim(vals) == 1` then the shape will be simply `[M, 2]`
# Otherwise, `L` will be the shape of `vals` without axis `axis`.
conf = [[PERC_FUNC(vals, 100.0*cdf[0], axis=axis),
PERC_FUNC(vals, 100.0*cdf[1], axis=axis)]
for cdf in cdf_vals]
conf = np.array(conf)
# Reshape from `[M, 2, L]` to `[L, M, 2]`
if (np.ndim(vals) > 1) and (axis is not None):
conf = np.moveaxis(conf, -1, 0)
med = PERC_FUNC(vals, 50.0, axis=axis)
if len(conf) == 1:
conf = conf[0]
if return_ci:
return med, conf, percs
return med, conf
def cumstats(arr):
"""Calculate a cumulative average and standard deviation.
Arguments
---------
arr <flt>[N] : input array
Returns
-------
ave <flt>[N] : cumulative average over ``arr``
std <flt>[N] : cumulative standard deviation over ``arr``
"""
tot = len(arr)
num = np.arange(tot)
std = np.zeros(tot)
# Cumulative sum
sm1 = np.cumsum(arr)
# Cumulative sum of squares
sm2 = np.cumsum(np.square(arr))
# Cumulative average
ave = sm1/(num+1.0)
std[1:] = np.fabs(sm2[1:] - np.square(sm1[1:])/(num[1:]+1.0))/num[1:]
std[1:] = np.sqrt(std[1:])
return ave, std
def frac_str(num, den=None, frac_fmt=None, dec_fmt=None):
"""Create a string of the form '{}/{} = {}' for reporting fractional values.
"""
if den is None:
assert num.dtype == bool, "If no `den` is given, array must be boolean!"
den = num.size
num = np.count_nonzero(num)
try:
dec_frac = num / den
except ZeroDivisionError:
dec_frac = np.nan
if frac_fmt is None:
frac_exp = np.fabs(np.log10([num, den]))
if np.any(frac_exp >= 4):
frac_fmt = ".1e"
else:
frac_fmt = "d"
if dec_fmt is None:
dec_exp = np.fabs(np.log10(dec_frac))
if dec_exp > 3:
dec_fmt = ".3e"
else:
dec_fmt = ".4f"
fstr = "{num:{ff}}/{den:{ff}} = {frac:{df}}".format(
num=num, den=den, frac=dec_frac, ff=frac_fmt, df=dec_fmt)
return fstr
def info(array, shape=True, sample=3, stats=True):
rv = ""
if shape:
rv += "{} ".format(np.shape(array))
if (sample is not None) and (sample > 0):
rv += "{} ".format(math_core.str_array(array, sides=sample))
if stats:
rv += "{} ".format(stats_str(array, label=False))
return rv
def log_normal_base_10(mu, sigma, size=None, shift=0.0):
"""Draw from a lognormal distribution with values in base-10 (instead of e).
Arguments
---------
mu : (N,) scalar
Mean of the distribution in linear space (e.g. 1.0e8 instead of 8.0).
sigma : (N,) scalar
Variance of the distribution *in dex* (e.g. 1.0 means factor of 10.0 variance)
size : (M,) int
Desired size of sample.
Returns
-------
dist : (M,...) scalar
Resulting distribution of values (in linear space).
"""
_sigma = np.log(10**sigma)
dist = np.random.lognormal(np.log(mu) + shift*np.log(10.0), _sigma, size)
return dist
def mean(vals, weights=None, **kwargs):
if weights is None:
return np.mean(vals, **kwargs)
ave = np.sum(vals*weights, **kwargs) / np.sum(weights, **kwargs)
return ave
def percentiles(*args, **kwargs):
utils.dep_warn("percentiles", newname="quantiles")
return quantiles(*args, **kwargs)
def quantiles(values, percs=None, sigmas=None, weights=None, axis=None,
values_sorted=False, filter=None):
"""Compute weighted percentiles.
Copied from @Alleo answer: http://stackoverflow.com/a/29677616/230468
NOTE: if `values` is a masked array, then only unmasked values are used!
Arguments
---------
values: (N,)
input data
percs: (M,) scalar [0.0, 1.0]
Desired percentiles of the data.
weights: (N,) or `None`
Weighted for each input data point in `values`.
values_sorted: bool
If True, then input values are assumed to already be sorted.
Returns
-------
percs : (M,) float
Array of percentiles of the weighted input data.
"""
if filter is not None:
values = math_core.comparison_filter(values, filter)
if not isinstance(values, np.ma.MaskedArray):
values = np.asarray(values)
if percs is None:
percs = sp.stats.norm.cdf(sigmas)
if np.ndim(values) > 1:
if axis is None:
values = values.flatten()
else:
if axis is not None:
raise ValueError("Cannot act along axis '{}' for 1D data!".format(axis))
percs = np.array(percs)
if weights is None:
weights = np.ones_like(values)
weights = np.array(weights)
try:
weights = np.ma.masked_array(weights, mask=values.mask)
except AttributeError:
pass
assert np.all(percs >= 0.0) and np.all(percs <= 1.0), 'percentiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values, axis=axis)
values = np.take_along_axis(values, sorter, axis=axis)
weights = np.take_along_axis(weights, sorter, axis=axis)
if axis is None:
weighted_quantiles = np.cumsum(weights) - 0.5 * weights
weighted_quantiles /= np.sum(weights)
percs = np.interp(percs, weighted_quantiles, values)
return percs
weights = np.moveaxis(weights, axis, -1)
values = np.moveaxis(values, axis, -1)
weighted_quantiles = np.cumsum(weights, axis=-1) - 0.5 * weights
weighted_quantiles /= np.sum(weights, axis=-1)[..., np.newaxis]
# weighted_quantiles = np.moveaxis(weighted_quantiles, axis, -1)
percs = [np.interp(percs, weighted_quantiles[idx], values[idx])
for idx in np.ndindex(values.shape[:-1])]
percs = np.array(percs)
return percs
def percs_from_sigma(sigma, side='in', boundaries=False):
"""Convert from standard deviation 'sigma' to percentiles in/out-side the normal distribution.
Arguments
---------
sig : (N,) array_like scalar
Standard deviations.
side : str, {'in', 'out'}
Calculate percentiles inside (i.e. [-sig, sig]) or ouside (i.e. [-inf, -sig] U [sig, inf])
boundaries : bool
Whether boundaries should be given ('True'), or the area ('False').
Returns
-------
vals : (N,) array_like scalar
Percentiles corresponding to the input `sig`.
"""
if side.startswith('in'):
inside = True
elif side.startswith('out'):
inside = False
else:
raise ValueError("`side` = '{}' must be {'in', 'out'}.".format(side))
# From CDF from -inf to `sig`
cdf = sp.stats.norm.cdf(sigma)
# Area outside of [-sig, sig]
vals = 2.0 * (1.0 - cdf)
# Convert to area inside [-sig, sig]
if inside:
vals = 1.0 - vals
# Convert from area to locations of boundaries (fractions)
if boundaries:
if inside:
vlo = 0.5*(1 - vals)
vhi = 0.5*(1 + vals)
else:
vlo = 0.5*vals
vhi = 1.0 - 0.5*vals
return vlo, vhi
return vals
def random_power(extr, pdf_index, size=1, **kwargs):
"""Draw from power-law PDF with the given extrema and index.
Arguments
---------
extr : array_like scalar
The minimum and maximum value of this array are used as extrema.
pdf_index : scalar
The power-law index of the PDF distribution to be drawn from. Any real number is valid,
positive or negative.
NOTE: the `numpy.random.power` function uses the power-law index of the CDF, i.e. `g+1`
size : scalar
The number of points to draw (cast to int).
**kwags : dict pairs
Additional arguments passed to `zcode.math_core.minmax` with `extr`.
Returns
-------
rv : (N,) scalar
Array of random variables with N=`size` (default, size=1).
"""
# if not np.isscalar(pdf_index):
# err = "`pdf_index` (shape {}; {}) must be a scalar value!".format(
# np.shape(pdf_index), pdf_index)
# raise ValueError(err)
extr = math_core.minmax(extr, filter='>', **kwargs)
if pdf_index == -1:
rv = 10**np.random.uniform(*np.log10(extr), size=int(size))
else:
rr = np.random.random(size=int(size))
gex = extr ** (pdf_index+1)
rv = (gex[0] + (gex[1] - gex[0])*rr) ** (1./(pdf_index+1))
return rv
def sigma(*args, **kwargs):
# ---- DECPRECATION SECTION ----
utils.dep_warn("sigma", newname="percs_from_sigma")
# ------------------------------
return percs_from_sigma(*args, **kwargs)
def stats(vals, median=False):
"""Get basic statistics for the given array.
Arguments
---------
vals <flt>[N] : input array
median <bool> : include median in return values
Returns
-------
ave <flt>
std <flt>
[med <flt>] : median, returned if ``median`` is `True`
"""
ave = np.average(vals)
std = np.std(vals)
if(median):
med = np.median(vals)
return ave, std, med
return ave, std
def stats_str(data, percs=[0.0, 0.16, 0.50, 0.84, 1.00], ave=False, std=False, weights=None,
format=None, log=False, label=True, label_log=True, filter=None):
"""Return a string with the statistics of the given array.
Arguments
---------
data : ndarray of scalar
Input data from which to calculate statistics.
percs : array_like of scalars in {0, 100}
Which percentiles to calculate.
ave : bool
Include average value in output.
std : bool
Include standard-deviation in output.
format : str
Formatting for all numerical output, (e.g. `":.2f"`).
log : bool
Convert values to log10 before printing.
label : bool
Add label for which percentiles are being printed
label_log : bool
If `log` is also true, append a string saying these are log values.
Output
------
out : str
Single-line string of the desired statistics.
"""
# data = np.array(data).astype(np.float)
data = np.array(data)
if filter is not None:
data = math_core.comparison_filter(data, filter)
if np.size(data) == 0:
return "empty after filtering"
if log:
data = np.log10(data)
percs = np.atleast_1d(percs)
if np.any(percs > 1.0):
warnings.warn("WARNING: zcode.math.statistic: input `percs` should be [0.0, 1.0], "
"dividing these by 100.0!")
percs /= 100.0
percs_flag = False
if (percs is not None) and len(percs):
percs_flag = True
out = ""
if format is None:
allow_int = False if (ave or std) else True
format = math_core._guess_str_format_from_range(data, allow_int=allow_int)
# If a `format` is given, but missing the colon, add the colon
if len(format) and not format.startswith(':'):
format = ':' + format
form = "{{{}}}".format(format)
# Add average
if ave:
out += "ave = " + form.format(np.average(data))
if std or percs_flag:
out += ", "
# Add standard-deviation
if std:
out += "std = " + form.format(np.std(data))
if percs_flag:
out += ", "
# Add percentiles
if percs_flag:
tiles = quantiles(data, percs, weights=weights).astype(data.dtype)
out += "(" + ", ".join(form.format(tt) for tt in tiles) + ")"
if label:
out += ", for (" + ", ".join("{:.0f}%".format(100*pp) for pp in percs) + ")"
# Note if these are log-values
if log and label_log:
out += " (log values)"
return out
def std(vals, weights=None, **kwargs):
"""
See: https://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weightsd.pdf
"""
if weights is None:
return np.std(vals, **kwargs)
mm = np.count_nonzero(weights)
ave = mean(vals, weights=weights, **kwargs)
num = np.sum(weights * (vals - ave)**2)
den = np.sum(weights) * (mm - 1) / mm
std = np.sqrt(num/den)
return std
class LH_Sampler:
"""
Much of this code was taken from the pyDOE project:
- https://github.com/tisimst/pyDOE
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
'''
@classmethod
def oversample(cls, npar, nsamp, oversamp, **kwargs):
if not isinstance(oversamp, int) or oversamp < 1:
raise ValueError(f"`oversamp` argument '{oversamp}' must be an integer!")
samples = None
for ii in range(oversamp):
ss = cls.sample(npar, nsamp=nsamp, **kwargs)
if samples is None:
samples = ss
else:
samples = np.append(samples, ss, axis=-1)
return samples
'''
@classmethod
def sample(cls, vals, nsamp=None, **kwargs):
if isinstance(vals, int):
return cls.sample_unit(vals, nsamp=nsamp, **kwargs)
return cls.sample_vals(vals, nsamp=nsamp, **kwargs)
@classmethod
def sample_vals(cls, vals, nsamp=None, log=False, **kwargs):
vals = np.asarray(vals)
try:
npar, check = np.shape(vals)
if (check != 2) or (npar < 2):
raise ValueError
except ValueError:
print(f"vals = {vals}")
raise ValueError(f"Shape of `vals` ({np.shape(vals)}) must be (N,2)!")
if np.isscalar(log):
log = [log] * npar
if np.any([ll not in [True, False] for ll in log]):
raise ValueError(f"`log` value(s) must be 'True' or 'False'!")
# Draw samples in [0.0, 1.0]
samps = cls.sample_unit(npar, nsamp=nsamp, **kwargs)
# Map samples to the given ranges in log or linear space
for ii, vv in enumerate(vals):
if log[ii]:
vv = np.log10(vv)
# temp = np.copy(samps[ii, :])
# samps[ii, :] *= (vv.max() - vv.min())
# samps[ii, :] += vv.min()
samps[ii, :] = (vv.max() - vv.min()) * samps[ii, :] + vv.min()
if log[ii]:
samps[ii, :] = 10.0 ** samps[ii, :]
vv = 10.0 ** vv
# if np.any((samps[ii] < vv.min()) | (samps[ii] > vv.max())):
# print(f"temp = {temp}")
# print(f"vv = {vv}")
# err = (
# f"Samples ({stats_str(samps[ii])}) exceeded "
# f"values ({math_core.minmax(vv)})"
# )
# raise ValueError(err)
return samps
@classmethod
def sample_unit(cls, npar, nsamp=None, center=False, optimize=None, iterations=10):
if nsamp is None:
nsamp = npar
# Construct optimization variables/functions
optimize = None if (optimize is None) else optimize.lower()
if optimize is not None:
if optimize.startswith('dist'):
extr = 0.0
mask = np.ones((nsamp, nsamp), dtype=bool)
comp = np.less
# Minimum euclidean distance between points
def metric(xx):
dist = (xx[:, np.newaxis, :] - xx[:, :, np.newaxis])**2
dist = np.sum(dist, axis=0)
return np.min(dist[mask])
elif optimize.startswith('corr'):
extr = np.inf
mask = np.ones((npar, npar), dtype=bool)
comp = np.greater
# Maximum correlation
metric = lambda xx: np.max(np.abs(np.corrcoef(xx)[mask]))
np.fill_diagonal(mask, False)
# iterate over randomizations
for ii in range(iterations):
cand = cls._sample(npar, nsamp, center=center)
if optimize is None:
samples = cand
break
# -- Optimize
# Calculate the metric being optimized
met = metric(cand)
# Compare the metric to the previous extrema and store new values if better
if comp(extr, met):
extr = met
samples = cand
return samples
@classmethod
def _sample(cls, npar, nsamp, center=False):
# Generate the intervals
cut = np.linspace(0, 1, nsamp + 1)
lo = cut[:-1]
hi = cut[1:]
# Fill points uniformly in each interval
shape = (npar, nsamp) # , nreals)
if center:
points = np.zeros(shape)
points[...] = 0.5 * (lo + hi)[np.newaxis, :]
else:
points = np.random.uniform(size=shape)
points = points * (hi - lo)[np.newaxis, :] + lo[np.newaxis, :]
for j in range(npar):
points[j, :] = np.random.permutation(points[j, :])
return points
| mit | -1,670,735,747,756,827,600 | 31.439276 | 98 | 0.57416 | false |
marcok/odoo_modules | hr_employee_time_clock/migrations/11.0.0.0.13/post-migrate.py | 1 | 2402 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import api, fields, models, SUPERUSER_ID, _
from dateutil import rrule, parser
import pytz
from datetime import datetime, date, timedelta
import calendar
import math
import logging
_logger = logging.getLogger(__name__)
def migrate(cr, version):
"""
This migration is made to calculate running time for each active employee and
write it into last attendance, which has check out. It is important to
companies that already use Employee Time Clock module.
"""
env = api.Environment(cr, SUPERUSER_ID, {})
employee_ids = env['hr.employee'].search([('active', '=', True)])
i = len(employee_ids)
analytic = env['employee.attendance.analytic']
analytic.search([]).unlink()
for employee in employee_ids:
_logger.info('\n')
_logger.info(i)
_logger.info(employee.name)
sheets = env['hr_timesheet_sheet.sheet'].search(
[('employee_id', '=', employee.id)])
for sheet in sheets:
analytic.create_line(
sheet, sheet.date_from, sheet.date_to)
attendances = env['hr.attendance'].search(
[('sheet_id', '=', sheet.id)])
for attendance in attendances:
if attendance.check_out:
analytic.recalculate_line_worktime(
attendance, {'check_out': attendance.check_out})
i -= 1
| agpl-3.0 | -8,948,538,899,177,939,000 | 37.126984 | 84 | 0.609492 | false |
valesi/electrum | lib/daemon.py | 1 | 6276 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast, os
import jsonrpclib
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
from util import json_decode, DaemonThread
from wallet import WalletStorage, Wallet
from wizard import WizardBase
from commands import known_commands, Commands
from simple_config import SimpleConfig
def lockfile(config):
return os.path.join(config.path, 'daemon')
def get_daemon(config):
try:
with open(lockfile(config)) as f:
host, port = ast.literal_eval(f.read())
except:
return
server = jsonrpclib.Server('http://%s:%d' % (host, port))
# check if daemon is running
try:
server.ping()
return server
except:
pass
class RequestHandler(SimpleJSONRPCRequestHandler):
def do_OPTIONS(self):
self.send_response(200)
self.end_headers()
def end_headers(self):
self.send_header("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept")
self.send_header("Access-Control-Allow-Origin", "*")
SimpleJSONRPCRequestHandler.end_headers(self)
class Daemon(DaemonThread):
def __init__(self, config, network):
DaemonThread.__init__(self)
self.config = config
self.network = network
self.gui = None
self.wallets = {}
self.wallet = None
self.cmd_runner = Commands(self.config, self.wallet, self.network)
host = config.get('rpchost', 'localhost')
port = config.get('rpcport', 0)
self.server = SimpleJSONRPCServer((host, port), requestHandler=RequestHandler, logRequests=False)
with open(lockfile(config), 'w') as f:
f.write(repr(self.server.socket.getsockname()))
self.server.timeout = 0.1
for cmdname in known_commands:
self.server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
self.server.register_function(self.run_cmdline, 'run_cmdline')
self.server.register_function(self.ping, 'ping')
self.server.register_function(self.run_daemon, 'daemon')
self.server.register_function(self.run_gui, 'gui')
def ping(self):
return True
def run_daemon(self, config):
sub = config.get('subcommand')
assert sub in ['start', 'stop', 'status']
if sub == 'start':
response = "Daemon already running"
elif sub == 'status':
p = self.network.get_parameters()
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'nodes': self.network.get_interfaces(),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'wallets': dict([ (k, w.is_up_to_date()) for k, w in self.wallets.items()]),
}
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
if hasattr(self.gui, 'new_window'):
path = config.get_wallet_path()
self.gui.new_window(path, config.get('url'))
response = "ok"
else:
response = "error: current GUI does not support multiple windows"
else:
response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, path, get_wizard=None):
if path in self.wallets:
wallet = self.wallets[path]
else:
storage = WalletStorage(path)
if get_wizard:
if storage.file_exists:
wallet = Wallet(storage)
action = wallet.get_action()
else:
action = 'new'
if action:
wizard = get_wizard()
wallet = wizard.run(self.network, storage)
else:
wallet.start_threads(self.network)
else:
wallet = Wallet(storage)
wallet.start_threads(self.network)
if wallet:
self.wallets[path] = wallet
return wallet
def run_cmdline(self, config_options):
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
path = config.get_wallet_path()
wallet = self.load_wallet(path) if cmd.requires_wallet else None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = map(json_decode, args)
# options
args += map(lambda x: config.get(x), cmd.options)
cmd_runner = Commands(config, wallet, self.network,
password=config_options.get('password'),
new_password=config_options.get('new_password'))
func = getattr(cmd_runner, cmd.name)
result = func(*args)
return result
def run(self):
while self.is_running():
self.server.handle_request()
os.unlink(lockfile(self.config))
def stop(self):
for k, wallet in self.wallets.items():
wallet.stop_threads()
DaemonThread.stop(self)
| gpl-3.0 | 5,581,530,443,373,276,000 | 35.068966 | 105 | 0.597674 | false |
rajalokan/nova | nova/policies/server_groups.py | 1 | 2174 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-groups'
POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
BASE_POLICY_RULE = 'rule:%s' % BASE_POLICY_NAME
server_groups_policies = [
# TODO(Kevin_Zheng): remove this rule as this not used by any API
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
base.create_rule_default(
POLICY_ROOT % 'create',
BASE_POLICY_RULE,
"Create a new server group",
[
{
'path': '/os-server-groups',
'method': 'POST'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'delete',
BASE_POLICY_RULE,
"Delete a server group",
[
{
'path': '/os-server-groups/{server_group_id}',
'method': 'DELETE'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'index',
BASE_POLICY_RULE,
"List all server groups",
[
{
'path': '/os-server-groups',
'method': 'GET'
}
]
),
base.create_rule_default(
POLICY_ROOT % 'show',
BASE_POLICY_RULE,
"Show details of a server group",
[
{
'path': '/os-server-groups/{server_group_id}',
'method': 'GET'
}
]
),
]
def list_rules():
return server_groups_policies
| apache-2.0 | -7,074,195,272,985,386,000 | 26.518987 | 78 | 0.561638 | false |
cmcantalupo/geopm | scripts/test/TestPolicyStoreIntegration.py | 1 | 3364 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import unittest
import geopmpy.policy_store
import geopmpy.version
class TestPolicyStoreIntegration(unittest.TestCase):
@unittest.skipIf(not geopmpy.version.__beta__, "PolicyStoreIntegration requires beta features")
def test_all_interfaces(self):
geopmpy.policy_store.connect(':memory:')
geopmpy.policy_store.set_best('frequency_map', 'p1', [0.5, 1])
geopmpy.policy_store.set_default('frequency_map', [2, 4])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('invalid_agent', [])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('monitor', [0.5])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('invalid_agent', 'pinv', [])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('monitor', 'pinv', [0.5])
self.assertEqual([0.5, 1], geopmpy.policy_store.get_best('frequency_map', 'p1'))
self.assertEqual([2, 4], geopmpy.policy_store.get_best('frequency_map', 'p2'))
with self.assertRaises(RuntimeError):
geopmpy.policy_store.get_best('power_balancer', 'p2')
geopmpy.policy_store.disconnect()
# Attempt accesses to a closed connection
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_best('frequency_map', 'p1', [0.5, 1])
with self.assertRaises(RuntimeError):
geopmpy.policy_store.set_default('frequency_map', [2, 4])
with self.assertRaises(RuntimeError):
self.assertEqual([0.5, 1], geopmpy.policy_store.get_best('frequency_map', 'p1'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 7,266,879,725,349,241,000 | 43.853333 | 99 | 0.699762 | false |
smvv/trs | src/rules/fractions.py | 1 | 14707 | # This file is part of TRS (http://math.kompiler.org)
#
# TRS is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TRS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with TRS. If not, see <http://www.gnu.org/licenses/>.
from itertools import combinations, product, ifilterfalse
from .utils import least_common_multiple, partition, is_numeric_node, \
evals_to_numeric
from ..node import ExpressionNode as N, ExpressionLeaf as L, Scope, OP_DIV, \
OP_ADD, OP_MUL, negate
from ..possibilities import Possibility as P, MESSAGES
from ..translate import _
from .negation import negate_polynome
def match_constant_division(node):
"""
a / 0 -> Division by zero
a / 1 -> a
0 / a -> 0
a / a -> 1
"""
assert node.is_op(OP_DIV)
p = []
nominator, denominator = node
# a / 0
if denominator == 0:
raise ZeroDivisionError('Division by zero: %s.' % node)
# a / 1
if denominator == 1:
p.append(P(node, division_by_one, (nominator,)))
# 0 / a
if nominator == 0:
p.append(P(node, division_of_zero, (denominator,)))
# a / a
if nominator == denominator:
p.append(P(node, division_by_self, (nominator,)))
return p
def division_by_one(root, args):
"""
a / 1 -> a
"""
return args[0].negate(root.negated)
MESSAGES[division_by_one] = _('Division by `1` yields the nominator.')
def division_of_zero(root, args):
"""
0 / a -> 0
"""
return L(0, negated=root.negated)
MESSAGES[division_of_zero] = _('Division of `0` by {1} reduces to `0`.')
def division_by_self(root, args):
"""
a / a -> 1
"""
return L(1, negated=root.negated)
MESSAGES[division_by_self] = _('Division of {1} by itself reduces to `1`.')
def match_add_fractions(node):
"""
a / b + c / b and a, c in Z -> (a + c) / b
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e # e = lcm(b, d)
# | e = b * d
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
assert node.is_op(OP_ADD)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
numerics = filter(is_numeric_node, others)
for ab, cd in combinations(fractions, 2):
a, b = ab
c, d = cd
if b == d:
# Equal denominators, add nominators to create a single fraction
p.append(P(node, add_nominators, (scope, ab, cd)))
elif all(map(is_numeric_node, (a, b, c, d))):
# Denominators are both numeric, rewrite both fractions to the
# least common multiple of their denominators. Later, the
# nominators will be added
lcm = least_common_multiple(b.value, d.value)
p.append(P(node, equalize_denominators, (scope, ab, cd, lcm)))
# Also, add the (non-recommended) possibility to multiply the
# denominators. Do this only if the multiplication is not equal to
# the least common multiple, to avoid duplicate possibilities
mult = b.value * d.value
if mult != lcm:
p.append(P(node, equalize_denominators, (scope, ab, cd, mult)))
for ab, c in product(fractions, numerics):
a, b = ab
if a.is_numeric() and b.is_numeric():
# Fraction of constants added to a constant -> create a single
# constant fraction
p.append(P(node, constant_to_fraction, (scope, ab, c)))
return p
def add_nominators(root, args):
"""
a / b + c / b and a, c in Z -> (a + c) / b
"""
scope, ab, cb = args
a, b = ab
c = cb[0]
# Replace the left node with the new expression, transfer fraction
# negations to nominators
scope.replace(ab, (a.negate(ab.negated) + c.negate(cb.negated)) / b)
scope.remove(cb)
return scope.as_nary_node()
MESSAGES[add_nominators] = \
_('Add the nominators of {2} and {3} to create a single fraction.')
def equalize_denominators(root, args):
"""
a / b + c / d and a, b, c, d in Z -> a' / e + c' / e
"""
scope, denom = args[::3]
for fraction in args[1:3]:
n, d = fraction
mult = denom / d.value
if mult != 1:
if n.is_numeric():
nom = L(n.value * mult)
else:
nom = L(mult) * n
scope.replace(fraction, negate(nom / L(d.value * mult),
fraction.negated))
return scope.as_nary_node()
MESSAGES[equalize_denominators] = \
_('Equalize the denominators of divisions {2} and {3} to {4}.')
def constant_to_fraction(root, args):
"""
a / b + c and a, b, c in Z -> a / b + (bc) / b # =>* (a + bc) / b
"""
scope, ab, c = args
b = ab[1]
scope.replace(c, b * c / b)
return scope.as_nary_node()
MESSAGES[constant_to_fraction] = \
_('Rewrite constant {3} to a fraction to be able to add it to {2}.')
def match_multiply_fractions(node):
"""
a / b * c / d -> (ac) / (bd)
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
assert node.is_op(OP_MUL)
p = []
scope = Scope(node)
fractions, others = partition(lambda n: n.is_op(OP_DIV), scope)
for ab, cd in combinations(fractions, 2):
p.append(P(node, multiply_fractions, (scope, ab, cd)))
for ab, c in product(fractions, others):
if evals_to_numeric(c) or not evals_to_numeric(ab):
p.append(P(node, multiply_with_fraction, (scope, ab, c)))
return p
def multiply_fractions(root, args):
"""
a / b * (c / d) -> ac / (bd)
"""
scope, ab, cd = args
a, b = ab
c, d = cd
scope.replace(ab, (a * c / (b * d)).negate(ab.negated + cd.negated))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[multiply_fractions] = _('Multiply fractions {2} and {3}.')
def multiply_with_fraction(root, args):
"""
a / b * c and (eval(c) in Z or eval(a / b) not in Z) -> (ac) / b
"""
scope, ab, c = args
a, b = ab
if scope.index(ab) < scope.index(c):
nominator = a * c
else:
nominator = c * a
scope.replace(ab, negate(nominator / b, ab.negated))
scope.remove(c)
return scope.as_nary_node()
MESSAGES[multiply_with_fraction] = \
_('Multiply {3} with the nominator of fraction {2}.')
def match_divide_fractions(node):
"""
Reduce divisions of fractions to a single fraction.
Examples:
a / b / c -> a / (bc)
a / (b / c) -> ac / b
Note that:
a / b / (c / d) => ad / bd
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_DIV):
p.append(P(node, divide_fraction, tuple(nom) + (denom,)))
if denom.is_op(OP_DIV):
p.append(P(node, divide_by_fraction, (nom,) + tuple(denom)))
return p
def divide_fraction(root, args):
"""
a / b / c -> a / (bc)
"""
(a, b), c = root
return negate(a / (b * c), root.negated)
MESSAGES[divide_fraction] = \
_('Move {3} to denominator of fraction `{1} / {2}`.')
def divide_by_fraction(root, args):
"""
a / (b / c) -> ac / b
"""
a, bc = root
b, c = bc
return negate(a * c / b, root.negated + bc.negated)
MESSAGES[divide_by_fraction] = \
_('Move {3} to the nominator of fraction `{1} / {2}`.')
def is_power_combination(a, b):
"""
Check if two nodes are powers that can be combined in a fraction, for
example:
a and a^2
a^2 and a^2
a^2 and a
"""
if a.is_power():
a = a[0]
if b.is_power():
b = b[0]
return a == b
def mult_scope(node):
"""
Get the multiplication scope of a node that may or may no be a
multiplication itself.
"""
if node.is_op(OP_MUL):
return Scope(node)
return Scope(N(OP_MUL, node))
def remove_from_mult_scope(scope, node):
if len(scope) == 1:
scope.replace(node, L(1))
else:
scope.remove(node)
return scope.as_nary_node()
def match_extract_fraction_terms(node):
"""
Divide nominator and denominator by the same part. If the same root of a
power appears in both nominator and denominator, also extract it so that it
can be reduced to a single power by power division rules.
Examples:
ab / (ac) -> a / a * (c / e) # =>* c / e
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e) # -> a^(b - d)(c / e)
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
assert node.is_op(OP_DIV)
n_scope, d_scope = map(mult_scope, node)
p = []
nominator, denominator = node
# ac / b
for n in ifilterfalse(evals_to_numeric, n_scope):
a_scope = mult_scope(nominator)
#a = remove_from_mult_scope(a_scope, n)
if len(a_scope) == 1:
a = L(1)
else:
a = a_scope.all_except(n)
if evals_to_numeric(a / denominator):
p.append(P(node, extract_nominator_term, (a, n)))
if len(n_scope) == 1 and len(d_scope) == 1:
return p
# a ^ b * c / (a ^ d * e)
for n, d in product(n_scope, d_scope):
if n == d:
handler = divide_fraction_by_term
elif is_power_combination(n, d):
handler = extract_fraction_terms
else:
continue # pragma: nocover
p.append(P(node, handler, (n_scope, d_scope, n, d)))
return p
def extract_nominator_term(root, args):
"""
ac / b and eval(c) not in Z and eval(a / b) in Z -> a / b * c
"""
a, c = args
return negate(a / root[1] * c, root.negated)
MESSAGES[extract_nominator_term] = \
_('Extract {2} from the nominator of fraction {0}.')
def extract_fraction_terms(root, args):
"""
a ^ b * c / (a ^ d * e) -> a ^ b / a ^ d * (c / e)
"""
n_scope, d_scope, n, d = args
div = n / d * (remove_from_mult_scope(n_scope, n) \
/ remove_from_mult_scope(d_scope, d))
return negate(div, root.negated)
MESSAGES[extract_fraction_terms] = _('Extract `{3} / {4}` from fraction {0}.')
def divide_fraction_by_term(root, args):
"""
ab / a -> b
a / (ba) -> 1 / b
a * c / (ae) -> c / e
"""
n_scope, d_scope, n, d = args
nom = remove_from_mult_scope(n_scope, n)
d_scope.remove(d)
if not len(d_scope):
return negate(nom, root.negated)
return negate(nom / d_scope.as_nary_node(), root.negated)
MESSAGES[divide_fraction_by_term] = \
_('Divide nominator and denominator of {0} by {2}.')
def match_division_in_denominator(node):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
assert node.is_op(OP_DIV)
denom = node[1]
if not denom.is_op(OP_ADD):
return []
return [P(node, multiply_with_term, (n[1],))
for n in Scope(denom) if n.is_op(OP_DIV)]
def multiply_with_term(root, args):
"""
a / (b / c + d) -> (ca) / (c(b / c + d))
"""
c = args[0]
nom, denom = root
return negate(c * nom / (c * denom), root.negated)
MESSAGES[multiply_with_term] = \
_('Multiply nominator and denominator of {0} with {1}.')
def match_combine_fractions(node):
"""
a/b + c/d -> ad/(bd) + bc/(bd) # -> (ad + bc)/(bd)
"""
assert node.is_op(OP_ADD)
scope = Scope(node)
fractions = [n for n in scope if n.is_op(OP_DIV)]
p = []
for left, right in combinations(fractions, 2):
p.append(P(node, combine_fractions, (scope, left, right)))
return p
def combine_fractions(root, args):
"""
a/b + c/d -> ad/(bd) + bc/(bd)
"""
scope, ab, cd = args
(a, b), (c, d) = ab, cd
a = negate(a, ab.negated)
d = negate(d, cd.negated)
scope.replace(ab, a * d / (b * d) + b * c / (b * d))
scope.remove(cd)
return scope.as_nary_node()
MESSAGES[combine_fractions] = _('Combine fraction {2} and {3}.')
def match_remove_division_negation(node):
"""
-a / (-b + c) -> a / (--b - c)
"""
assert node.is_op(OP_DIV)
nom, denom = node
if node.negated:
if nom.is_op(OP_ADD) and any([n.negated for n in Scope(nom)]):
return [P(node, remove_division_negation, (True, nom))]
if denom.is_op(OP_ADD) and any([n.negated for n in Scope(denom)]):
return [P(node, remove_division_negation, (False, denom))]
return []
def remove_division_negation(root, args):
"""
-a / (-b + c) -> a / (--b - c)
"""
nom, denom = root
if args[0]:
nom = negate_polynome(nom, ())
else:
denom = negate_polynome(denom, ())
return negate(nom / denom, root.negated - 1)
MESSAGES[remove_division_negation] = \
_('Move negation from fraction {0} to polynome {2}.')
def match_fraction_in_division(node):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
assert node.is_op(OP_DIV)
nom, denom = node
p = []
if nom.is_op(OP_MUL):
scope = Scope(nom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (True, scope, n)))
if denom.is_op(OP_MUL):
scope = Scope(denom)
for n in scope:
if n.is_op(OP_DIV) and n[0] == 1:
p.append(P(node, fraction_in_division, (False, scope, n)))
return p
def fraction_in_division(root, args):
"""
(1 / a * b) / c -> b / (ac)
c / (1 / a * b) -> (ac) / b
"""
is_nominator, scope, fraction = args
nom, denom = root
if fraction.negated or fraction[0].negated:
scope.replace(fraction, fraction[0].negate(fraction.negated))
else:
scope.remove(fraction)
if is_nominator:
nom = scope.as_nary_node()
denom = fraction[1] * denom
else:
nom = fraction[1] * nom
denom = scope.as_nary_node()
return negate(nom / denom, root.negated)
MESSAGES[fraction_in_division] = \
_('Multiply both sides of fraction {0} with {3[1]}.')
| agpl-3.0 | -6,291,606,106,984,690,000 | 23.96944 | 79 | 0.540899 | false |
dsysoev/fun-with-algorithms | queue/maxheap.py | 1 | 3185 |
"""
Max heap implementation
https://en.wikipedia.org/wiki/Min-max_heap
Algorithm Average
Build heap O(n)
"""
from __future__ import print_function
from math import log, ceil
class MaxHeap(object):
""" Binary Max heap implementation """
def __init__(self):
self.__data = []
def max_heapify(self, start):
""" function with which to save the properties of the heap """
left = self.left_child(start)
right = self.right_child(start)
size = self.heap_size()
if left < size and self.__data[left] > self.__data[start]:
largest = left
elif right < size:
largest = right
else:
return
if right < size and self.__data[right] > self.__data[largest]:
largest = right
if largest != start and self.__data[start] < self.__data[largest]:
self.__data[start], self.__data[largest] = self.__data[largest], self.__data[start]
self.max_heapify(largest)
def add_list(self, lst):
""" add list of elements into the heap """
self.__data += lst
for index in range(self.parent(self.heap_size() - 1), -1, -1):
self.max_heapify(index)
def add(self, value):
""" add one element into the heap """
self.add_list([value])
def extract_max(self):
""" return maximum element from the heap """
value = self.__data[0]
del self.__data[0]
for position in range(self.parent(self.heap_size() - 1), -1, -1):
self.max_heapify(position)
return value
def heap_size(self):
""" return number of elements in the heap """
return len(self.__data)
def parent(self, index):
""" return parent index """
return (index + 1) // 2 - 1
def left_child(self, index):
""" return index of left child """
return 2 * index + 1
def right_child(self, index):
""" return index of right child """
return 2 * index + 2
def __str__(self):
# string lenght for center
strlen = 2 * 2 ** ceil(log(self.heap_size(), 2))
maxlevel = int(log(self.heap_size(), 2)) + 1
# add root element to string
string = str([self.__data[0]]).center(strlen) + '\n'
for index in range(1, maxlevel):
# get list of elements for current level
lst = self.__data[2 ** index - 1:2 ** (index + 1) - 1]
if index == maxlevel - 1:
# without center for last line
string += str(lst) + '\n'
else:
string += str(lst).center(strlen) + '\n'
return string
if __name__ in "__main__":
HEAP = MaxHeap()
LIST = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]
print("Build heap from list: {}".format(LIST))
HEAP.add_list(LIST)
print("Show heap:\n{}".format(HEAP))
for VALUE in [100]:
print("Add new element {}".format(VALUE))
HEAP.add(VALUE)
print("Show heap:\n{}".format(HEAP))
for _ in range(2):
MAX = HEAP.extract_max()
print("Extract max element: {}".format(MAX))
print("Show heap:\n{}".format(HEAP))
| mit | 5,271,140,296,426,704,000 | 29.92233 | 95 | 0.539403 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/billing_setup_error.py | 1 | 1902 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'BillingSetupErrorEnum',
},
)
class BillingSetupErrorEnum(proto.Message):
r"""Container for enum describing possible billing setup errors. """
class BillingSetupError(proto.Enum):
r"""Enum describing possible billing setup errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT = 2
CANNOT_REMOVE_STARTED_BILLING_SETUP = 3
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT = 4
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS = 5
INVALID_PAYMENTS_ACCOUNT = 6
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY = 7
INVALID_START_TIME_TYPE = 8
THIRD_PARTY_ALREADY_HAS_BILLING = 9
BILLING_SETUP_IN_PROGRESS = 10
NO_SIGNUP_PERMISSION = 11
CHANGE_OF_BILL_TO_IN_PROGRESS = 12
PAYMENTS_PROFILE_NOT_FOUND = 13
PAYMENTS_ACCOUNT_NOT_FOUND = 14
PAYMENTS_PROFILE_INELIGIBLE = 15
PAYMENTS_ACCOUNT_INELIGIBLE = 16
CUSTOMER_NEEDS_INTERNAL_APPROVAL = 17
PAYMENTS_ACCOUNT_INELIGIBLE_CURRENCY_CODE_MISMATCH = 19
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 2,008,635,368,135,882,800 | 34.886792 | 75 | 0.685594 | false |
ojii/django-nani | hvad/tests/forms.py | 1 | 5295 | # -*- coding: utf-8 -*-
from django.core.exceptions import FieldError
from hvad.forms import TranslatableModelForm, TranslatableModelFormMetaclass
from hvad.test_utils.context_managers import LanguageOverride
from hvad.test_utils.testcase import NaniTestCase
from testproject.app.models import Normal
from django.db import models
class NormalForm(TranslatableModelForm):
class Meta:
model = Normal
fields = ['shared_field', 'translated_field']
class NormalMediaForm(TranslatableModelForm):
class Meta:
model = Normal
class Media:
css = {
'all': ('layout.css',)
}
class NormalFormExclude(TranslatableModelForm):
class Meta:
model = Normal
exclude = ['shared_field']
class FormTests(NaniTestCase):
def test_nontranslatablemodelform(self):
# Make sure that TranslatableModelForm won't accept a regular model
# "Fake" model to use for the TranslatableModelForm
class NonTranslatableModel(models.Model):
field = models.CharField(max_length=128)
# Meta class for use below
class Meta:
model = NonTranslatableModel
# Make sure we do indeed get an exception, if we try to initialise it
self.assertRaises(TypeError,
TranslatableModelFormMetaclass,
'NonTranslatableModelForm', (TranslatableModelForm,),
{'Meta': Meta}
)
def test_normal_model_form_instantiation(self):
# Basic example and checking it gives us all the fields needed
form = NormalForm()
self.assertTrue("translated_field" in form.fields)
self.assertTrue("shared_field" in form.fields)
self.assertTrue("translated_field" in form.base_fields)
self.assertTrue("shared_field" in form.base_fields)
self.assertFalse(form.is_valid())
# Check if it works with media argument too
form = NormalMediaForm()
self.assertFalse(form.is_valid())
self.assertTrue("layout.css" in str(form.media))
# Check if it works with an instance of Normal
form = NormalForm(instance=Normal())
self.assertFalse(form.is_valid())
def test_normal_model_form_valid(self):
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
self.assertTrue("translated_field" in form.fields)
self.assertTrue("shared_field" in form.fields)
self.assertTrue(TRANSLATED in form.clean()["translated_field"])
self.assertTrue(SHARED in form.clean()["shared_field"])
def test_normal_model_form_initaldata_instance(self):
# Check if it accepts inital data and instance
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data, instance=Normal(), initial=data)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_normal_model_form_existing_instance(self):
# Check if it works with an existing instance of Normal
SHARED = 'Shared'
TRANSLATED = 'English'
instance = Normal.objects.language("en").create(shared_field=SHARED, translated_field=TRANSLATED)
form = NormalForm(instance=instance)
self.assertFalse(form.is_valid())
self.assertTrue(SHARED in form.as_p())
self.assertTrue(TRANSLATED in form.as_p())
def test_normal_model_form_save(self):
with LanguageOverride('en'):
SHARED = 'Shared'
TRANSLATED = 'English'
data = {
'shared_field': SHARED,
'translated_field': TRANSLATED,
'language_code': 'en'
}
form = NormalForm(data)
# tested a non-translated ModelForm, and that takes 7 queries.
with self.assertNumQueries(2):
obj = form.save()
with self.assertNumQueries(0):
self.assertEqual(obj.shared_field, SHARED)
self.assertEqual(obj.translated_field, TRANSLATED)
self.assertNotEqual(obj.pk, None)
def test_no_language_code_in_fields(self):
with LanguageOverride("en"):
form = NormalForm()
self.assertFalse(form.fields.has_key("language_code"))
form = NormalMediaForm()
self.assertFalse(form.fields.has_key("language_code"))
form = NormalFormExclude()
self.assertFalse(form.fields.has_key("language_code"))
def test_form_wrong_field_in_class(self):
with LanguageOverride("en"):
def create_wrong_form():
class WrongForm(TranslatableModelForm):
class Meta:
model = Normal
fields = ['a_field_that_doesnt_exist']
form = WrongForm()
self.assertRaises(FieldError, create_wrong_form)
| bsd-3-clause | -6,615,196,495,451,526,000 | 36.821429 | 105 | 0.605666 | false |
jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/tag_utils.py | 1 | 5013 | from __future__ import unicode_literals
import pmt
from . import gr_python as gr
class PythonTag(object):
" Python container for tags "
def __init__(self):
self.offset = None
self.key = None
self.value = None
self.srcid = False
def tag_to_python(tag):
""" Convert a stream tag to a Python-readable object """
newtag = PythonTag()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.to_python(tag.value)
newtag.srcid = pmt.to_python(tag.srcid)
return newtag
def tag_to_pmt(tag):
""" Convert a Python-readable object to a stream tag """
newtag = gr.tag_t()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.from_python(tag.value)
newtag.srcid = pmt.from_python(tag.srcid)
return newtag
def python_to_tag(tag_struct):
"""
Convert a Python list/tuple/dictionary to a stream tag.
When using a list or tuple format, this function expects the format:
tag_struct[0] --> tag's offset (as an integer)
tag_struct[1] --> tag's key (as a PMT)
tag_struct[2] --> tag's value (as a PMT)
tag_struct[3] --> tag's srcid (as a PMT)
When using a dictionary, we specify the dictionary keys using:
tag_struct['offset'] --> tag's offset (as an integer)
tag_struct['key'] --> tag's key (as a PMT)
tag_struct['value'] --> tag's value (as a PMT)
tag_struct['srcid'] --> tag's srcid (as a PMT)
If the function can take the Python object and successfully
construct a tag, it will return the tag. Otherwise, it will return
None.
"""
good = False
tag = gr.tag_t()
if(type(tag_struct) == dict):
if('offset' in tag_struct):
if(isinstance(tag_struct['offset'], int)):
tag.offset = tag_struct['offset']
good = True
if('key' in tag_struct):
if(isinstance(tag_struct['key'], pmt.pmt_base)):
tag.key = tag_struct['key']
good = True
if('value' in tag_struct):
if(isinstance(tag_struct['value'], pmt.pmt_base)):
tag.value = tag_struct['value']
good = True
if('srcid' in tag_struct):
if(isinstance(tag_struct['srcid'], pmt.pmt_base)):
tag.srcid = tag_struct['srcid']
good = True
elif(type(tag_struct) == list or type(tag_struct) == tuple):
if(len(tag_struct) == 4):
if(isinstance(tag_struct[0], int)):
tag.offset = tag_struct[0]
good = True
if(isinstance(tag_struct[1], pmt.pmt_base)):
tag.key = tag_struct[1]
good = True
if(isinstance(tag_struct[2], pmt.pmt_base)):
tag.value = tag_struct[2]
good = True
if(isinstance(tag_struct[3], pmt.pmt_base)):
tag.srcid = tag_struct[3]
good = True
elif(len(tag_struct) == 3):
if(isinstance(tag_struct[0], int)):
tag.offset = tag_struct[0]
good = True
if(isinstance(tag_struct[1], pmt.pmt_base)):
tag.key = tag_struct[1]
good = True
if(isinstance(tag_struct[2], pmt.pmt_base)):
tag.value = tag_struct[2]
good = True
tag.srcid = pmt.PMT_F
if(good):
return tag
else:
return None
def tag_t_offset_compare_key():
"""
Convert a tag_t.offset_compare function into a key=function
This method is modeled after functools.cmp_to_key(_func_).
It can be used by functions that accept a key function, such as
sorted(), min(), max(), etc. to compare tags by their offsets,
e.g., sorted(tag_list, key=gr.tag_t.offset_compare_key()).
"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
# x.offset < y.offset
return gr.tag_t.offset_compare(self.obj, other.obj)
def __gt__(self, other):
# y.offset < x.offset
return gr.tag_t.offset_compare(other.obj, self.obj)
def __eq__(self, other):
# not (x.offset < y.offset) and not (y.offset < x.offset)
return not gr.tag_t.offset_compare(self.obj, other.obj) and \
not gr.tag_t.offset_compare(other.obj, self.obj)
def __le__(self, other):
# not (y.offset < x.offset)
return not gr.tag_t.offset_compare(other.obj, self.obj)
def __ge__(self, other):
# not (x.offset < y.offset)
return not gr.tag_t.offset_compare(self.obj, other.obj)
def __ne__(self, other):
# (x.offset < y.offset) or (y.offset < x.offset)
return gr.tag_t.offset_compare(self.obj, other.obj) or \
gr.tag_t.offset_compare(other.obj, self.obj)
return K
| gpl-3.0 | 2,668,344,318,893,686,300 | 33.8125 | 73 | 0.549172 | false |
Workday/OpenFrame | native_client_sdk/src/build_tools/build_sdk.py | 1 | 36370 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
NACLPORTS_URL = 'https://chromium.googlesource.com/external/naclports.git'
NACLPORTS_REV = '65c71c1524a74ff8415573e5e5ef7c59ce4ac437'
GYPBUILD_DIR = 'gypbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetConfigDir(arch):
if arch.endswith('x64') and getos.GetPlatform() == 'win':
return 'Release_x64'
else:
return 'Release'
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GYPBUILD_DIR + '-' + arch, GetConfigDir(arch))
def GetGypBuiltLib(tcname, arch):
if arch == 'ia32':
lib_suffix = '32'
elif arch == 'x64':
lib_suffix = '64'
elif arch == 'arm':
lib_suffix = 'arm'
else:
lib_suffix = ''
tcdir = 'tc_' + GetToolchainLibc(tcname)
if tcname == 'pnacl':
if arch is None:
lib_suffix = ''
tcdir = 'tc_pnacl_newlib'
arch = 'x64'
else:
arch = 'clang-' + arch
return os.path.join(GetNinjaOutDir(arch), 'gen', tcdir, 'lib' + lib_suffix)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'ia32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
# TODO(ncbray): stop building and copying libraries into the SDK that are
# already provided by the toolchain.
# Mapping from libc to libraries gyp-build trusted libraries
TOOLCHAIN_LIBS = {
'newlib' : [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
tools_files_32 = [
['sel_ldr', 'sel_ldr_x86_32'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
arm_files = [
['elf_loader_newlib_arm.nexe', 'elf_loader_arm.nexe'],
]
tools_files_64 = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files_64.append(['sel_ldr', 'sel_ldr_x86_64'])
tools_files_64.append(['ncval_new', 'ncval'])
if platform == 'linux':
tools_files_32.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files_64.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64'])
tools_files_32.append(['nonsfi_loader_newlib_x32_nonsfi.nexe',
'nonsfi_loader_x86_32'])
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_32 + tools_files_64:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
# Add ARM binaries
if platform == 'linux' and not options.no_arm_trusted:
arm_files += [
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader_newlib_arm_nonsfi.nexe', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_64)
InstallFiles(GetNinjaOutDir('ia32'), tools_dir, tools_files_32)
InstallFiles(GetNinjaOutDir('arm'), tools_dir, arm_files)
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = (None, 'ia32', 'x64', 'arm')
elif tc in ('x86_glibc', 'x86_newlib'):
xarches = ('ia32', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
libc = GetToolchainLibc(tc)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[libc])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir_32 = MakeNinjaRelPath(rel_out_dir + '-ia32')
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-x64')
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
out_dir_clang_32 = MakeNinjaRelPath(rel_out_dir + '-clang-ia32')
out_dir_clang_64 = MakeNinjaRelPath(rel_out_dir + '-clang-x64')
out_dir_clang_arm = MakeNinjaRelPath(rel_out_dir + '-clang-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_32,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_64,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_32, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_64, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_arm, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, all_gyp, 'ncval_new', out_dir_64)
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('x64', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir, gyp_defines=None):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir,
gyp_defines=gyp_defines)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, gyp_defines=None):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = gyp_defines or []
gyp_defines.append('nacl_allow_thin_archives=0')
if not options.no_use_sysroot:
gyp_defines.append('use_sysroot=1')
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch is not None:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
gyp_env['GYP_CROSSCOMPILE'] = '1'
if options.no_arm_trusted:
gyp_defines.append('disable_cross_trusted=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
# We can't use windows path separators in GYP_GENERATOR_FLAGS since
# gyp uses shlex to parse them and treats '\' as an escape char.
gyp_env['GYP_GENERATOR_FLAGS'] = 'output_dir=%s' % out_dir.replace('\\', '/')
# Print relevant environment variables
for key, value in gyp_env.iteritems():
if key.startswith('GYP') or key in ('CC',):
print ' %s="%s"' % (key, value)
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, '--depth=.'],
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir, arch)
def NinjaBuild(targets, out_dir, arch):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, GetConfigDir(arch))
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GYPBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR + '-x64')
if set(toolchains) & set(['x86_glibc', 'x86_newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-ia32',
['use_nacl_clang=0'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-x64',
['use_nacl_clang=0'])
if 'arm_glibc' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm',
['use_nacl_clang=0'] )
if 'pnacl' in toolchains:
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-clang-ia32',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-clang-x64',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-clang-arm',
['use_nacl_clang=1'])
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
GypNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gypbuild directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
'usr/lib/arm-linux-gnueabihf/libstdc++.so.6'
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| bsd-3-clause | 1,889,471,415,100,197,600 | 34.797244 | 79 | 0.664889 | false |
tuos/FlowAndCorrelations | mc/step2/src/RECOHI_mc.py | 1 | 4615 | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3 --conditions auto:run2_mc_hi -s RAW2DIGI,L1Reco,RECO -n 4 --eventcontent RECODEBUG --runUnscheduled --scenario HeavyIons --datatier GEN-SIM-RECO --beamspot NominalHICollision2015 --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1_HI,RecoHI/Configuration/customise_RecoMergedTrackCollection.customiseAddMergedTrackCollection --io RECOHI_mc.io --python RECOHI_mc.py --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(4)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'/store/user/tuos/EPOSLHC/crab/Nov2017PbPb5020GeV/200kv1/MinBias/CRAB3_EPOSLHC_Nov2017_PbPb5020GeV_accre200kv1/171129_030608/0000/ReggeGribovPartonMC_EposLHC_5020GeV_PbPb_cfi_py_GEN_SIM_11.root'
#'file:/home/tuos/step2_DIGI_L1_DIGI2RAW_HLT_RAW2DIGI_L1Reco_1000_1_X6L.root'
'file:step2_DIGI_L1_DIGI2RAW_HLT_RAW2DIGI_L1Reco.root'
),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
allowUnscheduled = cms.untracked.bool(True)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step3 nevts:4'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RECODEBUGoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM-RECO'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('step3_RAW2DIGI_L1Reco_RECO.root'),
outputCommands = process.RECODEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_hi', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstructionHeavyIons)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECODEBUGoutput_step = cms.EndPath(process.RECODEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECODEBUGoutput_step)
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1_HI
#call to customisation function customisePostLS1_HI imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1_HI(process)
# Automatic addition of the customisation function from RecoHI.Configuration.customise_RecoMergedTrackCollection
from RecoHI.Configuration.customise_RecoMergedTrackCollection import customiseAddMergedTrackCollection
#call to customisation function customiseAddMergedTrackCollection imported from RecoHI.Configuration.customise_RecoMergedTrackCollection
process = customiseAddMergedTrackCollection(process)
# End of customisation functions
#do not add changes to your config after this point (unless you know what you are doing)
from FWCore.ParameterSet.Utilities import convertToUnscheduled
process=convertToUnscheduled(process)
from FWCore.ParameterSet.Utilities import cleanUnscheduled
process=cleanUnscheduled(process)
| mit | -738,442,596,770,843,600 | 46.091837 | 438 | 0.817551 | false |
moodpulse/l2 | refprocessor/processor.py | 1 | 1703 | from typing import Tuple, Union, List
from appconf.manager import SettingManager
from refprocessor.age_parser import AgeRight
from refprocessor.common import ValueRange, RANGE_IN
from refprocessor.result_parser import ResultRight
class RefProcessor:
def __init__(self, ref: dict, age: List[int]):
actual_key, actual_ref, actual_raw_ref = RefProcessor.get_actual_ref(ref, age)
self.key = actual_key
self.ref = actual_ref
self.raw_ref = actual_raw_ref
@staticmethod
def get_actual_ref(ref: dict, age: List[int]) -> Union[Tuple[str, ResultRight, str], Tuple[None, None, None]]:
for k in ref:
age_rights = AgeRight(k)
if age_rights.test(age):
return k, ResultRight(ref[k]), ref[k]
return None, None, None
def get_active_ref(self, raw_ref=True, single=False):
if raw_ref:
if single:
show_only_needed_ref = SettingManager.get("show_only_needed_ref", default='True', default_type='b')
if not show_only_needed_ref or not self.raw_ref:
return None
show_full_needed_ref = SettingManager.get("show_full_needed_ref", default='False', default_type='b')
if show_full_needed_ref:
return {self.key: self.raw_ref}
return {'Все': self.raw_ref}
return self.raw_ref
if isinstance(self.ref, ResultRight):
return self.ref
return ValueRange((0, ")"), (0, ")"))
def calc(self, value):
if isinstance(self.ref, ResultRight):
return self.ref.test(value)
return ResultRight.RESULT_MODE_NORMAL, RANGE_IN
| mit | 6,272,185,253,824,169,000 | 36.777778 | 116 | 0.606471 | false |
TacticalGoat/reddit | AutoContributor/autocontributor.py | 1 | 2306 | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#The subreddit you are acting on.
SUBJECTLINE = ['submission']
#If the modmail subject line contains one of these keywords, he will be added
MAXPOSTS = 100
#The number of modmails to collect at once. 100 can be fetched with a single request
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
print('Loaded Users table')
sql.commit()
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
WAITS = str(WAIT)
print('Logging in.')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scanmessages():
print('Getting ' + SUBREDDIT + ' modmail')
subreddit = r.get_subreddit(SUBREDDIT)
modmail = list(subreddit.get_mod_mail(limit=MAXPOSTS))
for message in modmail:
cur.execute('SELECT * FROM oldposts WHERE ID=?', [message.fullname])
if not cur.fetchone():
print(message.fullname)
try:
mauthor = message.author.name
msubject = message.subject.lower()
if any(keyword.lower() in msubject for keyword in SUBJECTLINE):
print('\tApproving ' + mauthor)
subreddit.add_contributor(mauthor)
message.mark_as_read()
except AttributeError:
print('Failed to fetch username')
cur.execute('INSERT INTO oldposts VALUES(?)', [message.fullname])
sql.commit()
while True:
try:
scanmessages()
except Exception as e:
print('ERROR: ' + str(e))
sql.commit()
print('Running again in ' + WAITS + ' seconds \n_________\n')
time.sleep(WAIT)
| mit | -7,222,148,781,160,823,000 | 28.948052 | 104 | 0.652645 | false |
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/CreateWorkspaceTest.py | 1 | 4168 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.api import MatrixWorkspace, AnalysisDataService
from mantid.simpleapi import CreateWorkspace
from testhelpers import run_algorithm
import numpy as np
class CreateWorkspaceTest(unittest.TestCase):
def test_create_with_1D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.array([1.,2.,3.])
e = np.sqrt(np.array([1.,2.,3.]))
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 1)
self.assertEquals(len(wksp.readY(0)), len(y))
self.assertEquals(len(wksp.readX(0)), len(x))
self.assertEquals(len(wksp.readE(0)), len(e))
for index in range(len(y)):
self.assertEquals(wksp.readY(0)[index], y[index])
self.assertEquals(wksp.readE(0)[index], e[index])
self.assertEquals(wksp.readX(0)[index], x[index])
# Last X value
self.assertEquals(wksp.readX(0)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_create_with_2D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.array([[1.,2.,3.],[4.,5.,6.]])
e = np.sqrt(y)
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=2,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y[0])):
self.assertEquals(wksp.readY(i)[j], y[i][j])
self.assertEquals(wksp.readE(i)[j], e[i][j])
self.assertEquals(wksp.readX(i)[j], x[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_with_data_from_other_workspace(self):
wsname = 'LOQ'
x1 = np.array([1.,2.,3.,4.])
y1 = np.array([[1.,2.,3.],[4.,5.,6.]])
e1 = np.sqrt(y1)
loq = CreateWorkspace(DataX=x1, DataY=y1,DataE=e1,NSpec=2,UnitX='Wavelength')
x2 = loq.extractX()
y2 = loq.extractY()
e2 = loq.extractE()
wksp = CreateWorkspace(DataX=x2, DataY=y2,DataE=e2,NSpec=2,UnitX='Wavelength')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y2[0])):
self.assertEquals(wksp.readY(i)[j], loq.readY(i)[j])
self.assertEquals(wksp.readE(i)[j], loq.readE(i)[j])
self.assertEquals(wksp.readX(i)[j], loq.readX(i)[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x2)-1], loq.readX(i)[len(x2)-1])
AnalysisDataService.remove("wksp")
def test_create_with_numerical_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = [5,6,7]
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
def test_create_with_numpy_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = np.array([6.,7.,8.])
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,580,050,091,464,534,000 | 39.862745 | 115 | 0.606046 | false |
openhatch/oh-greenhouse | greenhouse/migrations/0008_auto__add_field_people_control_group.py | 1 | 9876 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'People.control_group'
db.add_column(u'people', 'control_group',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'People.control_group'
db.delete_column(u'people', 'control_group')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'uploads.people': {
'Meta': {'object_name': 'People', 'db_table': "u'people'"},
'contacted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'control_group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.TextField', [], {'unique': 'True', 'blank': 'True'}),
'first_upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['uploads.Uploads']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_upload': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['uploads.Uploads']"}),
'name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'total_uploads': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'ubuntu_dev': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'uploads.udd': {
'Meta': {'unique_together': "(('source', 'version'),)", 'object_name': 'UDD', 'db_table': "u'upload_history'", 'managed': 'False'},
'changed_by': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed_by_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed_by_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'}),
'distribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fingerprint': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'key_id': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'nmu': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'signed_by': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'signed_by_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'signed_by_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.TextField', [], {})
},
u'uploads.uploads': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Uploads', 'db_table': "u'uploads'"},
'email_changer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_sponsor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_changer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name_sponsor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'package': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'release': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'uploads.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['uploads'] | agpl-3.0 | -2,483,690,194,374,190,600 | 72.708955 | 187 | 0.545666 | false |
spectrumone/django-outlook-api | python_tutorial/tutorial/outlookservice.py | 1 | 4799 | import requests
import uuid
import json
outlook_api_endpoint = 'https://outlook.office.com/api/v2.0{0}'
# Generic API Sending
def make_api_call(method, url, token, payload = None, parameters = None):
# Send these headers with all API calls
headers = { 'User-Agent' : 'django-tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(token),
'Accept' : 'application/json'}
# Use these headers to instrument calls. Makes it easier
# to correlate requests and responses in case of problems
# and is a recommended best practice.
request_id = str(uuid.uuid4())
instrumentation = { 'client-request-id' : request_id,
'return-client-request-id' : 'true' }
headers.update(instrumentation)
response = None
payload = {
"Subject": "Discuss the Calendar REST API",
"Body": {
"ContentType": "HTML",
"Content": "I think it will meet our requirements!"
},
"Start": {
"DateTime": "2014-04-04T18:00:00",
"TimeZone": "Pacific Standard Time"
},
"End": {
"DateTime": "2014-04-04T19:00:00",
"TimeZone": "Pacific Standard Time"
},
"Attendees": [
{
"EmailAddress": {
"Address": "[email protected]",
"Name": "Janet Schorr"
},
"Type": "Required"
}
]
}
if (method.upper() == 'GET'):
response = requests.get(url, headers = headers, params = parameters)
elif (method.upper() == 'DELETE'):
response = requests.delete(url, headers = headers, params = parameters)
elif (method.upper() == 'PATCH'):
headers.update({ 'Content-Type' : 'application/json' })
response = requests.patch(url, headers = headers, data = json.dumps(payload), params = parameters)
elif (method.upper() == 'POST'):
headers.update({ 'Content-Type' : 'application/json' })
response = requests.post(url, headers = headers, data = json.dumps(payload), params = parameters)
return response
def get_my_messages(access_token):
get_messages_url = outlook_api_endpoint.format('/Me/Messages')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the ReceivedDateTime, Subject, and From fields
# - Sort the results by the ReceivedDateTime field in descending order
query_parameters = {'$top': '10',
'$select': 'ReceivedDateTime,Subject,From',
'$orderby': 'ReceivedDateTime DESC'}
r = make_api_call('GET', get_messages_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def get_my_events(access_token):
get_events_url = outlook_api_endpoint.format('/Me/Events')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the Subject, Start, and End fields
# - Sort the results by the Start field in ascending order
query_parameters = {'$top': '10',
'$select': 'Subject,Start,End',
'$orderby': 'Start/DateTime ASC'}
r = make_api_call('GET', get_events_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def post_my_events(access_token):
post_events_url = outlook_api_endpoint.format('/Me/Events')
r = make_api_call('POST', post_events_url, access_token)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
def get_my_contacts(access_token):
get_contacts_url = outlook_api_endpoint.format('/Me/Contacts')
# Use OData query parameters to control the results
# - Only first 10 results returned
# - Only return the GivenName, Surname, and EmailAddresses fields
# - Sort the results by the GivenName field in ascending order
query_parameters = {'$top': '10',
'$select': 'GivenName,Surname,EmailAddresses',
'$orderby': 'GivenName ASC'}
r = make_api_call('GET', get_contacts_url, access_token, parameters = query_parameters)
if (r.status_code == requests.codes.ok):
return r.json()
else:
return "{0}: {1}".format(r.status_code, r.text)
| mit | 156,802,696,262,218,100 | 36.787402 | 106 | 0.574703 | false |
mwiencek/picard | picard/ui/itemviews.py | 1 | 27669 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import re
from PyQt4 import QtCore, QtGui
from picard.album import Album, NatAlbum
from picard.cluster import Cluster, ClusterList, UnmatchedFiles
from picard.file import File
from picard.track import Track, NonAlbumTrack
from picard.util import encode_filename, icontheme, partial
from picard.config import Option, TextOption
from picard.plugin import ExtensionPoint
from picard.ui.ratingwidget import RatingWidget
from picard.ui.collectionmenu import CollectionMenu
class BaseAction(QtGui.QAction):
NAME = "Unknown"
MENU = []
def __init__(self):
QtGui.QAction.__init__(self, self.NAME, None)
self.triggered.connect(self.__callback)
def __callback(self):
objs = self.tagger.window.panel.selected_objects()
self.callback(objs)
def callback(self, objs):
raise NotImplementedError
_album_actions = ExtensionPoint()
_cluster_actions = ExtensionPoint()
_clusterlist_actions = ExtensionPoint()
_track_actions = ExtensionPoint()
_file_actions = ExtensionPoint()
def register_album_action(action):
_album_actions.register(action.__module__, action)
def register_cluster_action(action):
_cluster_actions.register(action.__module__, action)
def register_clusterlist_action(action):
_clusterlist_actions.register(action.__module__, action)
def register_track_action(action):
_track_actions.register(action.__module__, action)
def register_file_action(action):
_file_actions.register(action.__module__, action)
def get_match_color(similarity, basecolor):
c1 = (basecolor.red(), basecolor.green(), basecolor.blue())
c2 = (223, 125, 125)
return QtGui.QColor(
c2[0] + (c1[0] - c2[0]) * similarity,
c2[1] + (c1[1] - c2[1]) * similarity,
c2[2] + (c1[2] - c2[2]) * similarity)
class MainPanel(QtGui.QSplitter):
options = [
Option("persist", "splitter_state", QtCore.QByteArray(), QtCore.QVariant.toByteArray),
]
columns = [
(N_('Title'), 'title'),
(N_('Length'), '~length'),
(N_('Artist'), 'artist'),
]
def __init__(self, window, parent=None):
QtGui.QSplitter.__init__(self, parent)
self.window = window
self.create_icons()
self.views = [FileTreeView(window, self), AlbumTreeView(window, self)]
self.views[0].itemSelectionChanged.connect(self.update_selection_0)
self.views[1].itemSelectionChanged.connect(self.update_selection_1)
self._selected_view = 0
self._ignore_selection_changes = False
self._selected_objects = set()
TreeItem.window = window
TreeItem.base_color = self.palette().base().color()
TreeItem.text_color = self.palette().text().color()
TrackItem.track_colors = {
File.NORMAL: self.config.setting["color_saved"],
File.CHANGED: TreeItem.text_color,
File.PENDING: self.config.setting["color_pending"],
File.ERROR: self.config.setting["color_error"],
}
FileItem.file_colors = {
File.NORMAL: TreeItem.text_color,
File.CHANGED: self.config.setting["color_modified"],
File.PENDING: self.config.setting["color_pending"],
File.ERROR: self.config.setting["color_error"],
}
def selected_objects(self):
return list(self._selected_objects)
def save_state(self):
self.config.persist["splitter_state"] = self.saveState()
for view in self.views:
view.save_state()
def restore_state(self):
self.restoreState(self.config.persist["splitter_state"])
def create_icons(self):
if hasattr(QtGui.QStyle, 'SP_DirIcon'):
ClusterItem.icon_dir = self.style().standardIcon(QtGui.QStyle.SP_DirIcon)
else:
ClusterItem.icon_dir = icontheme.lookup('folder', icontheme.ICON_SIZE_MENU)
AlbumItem.icon_cd = icontheme.lookup('media-optical', icontheme.ICON_SIZE_MENU)
AlbumItem.icon_cd_saved = icontheme.lookup('media-optical-saved', icontheme.ICON_SIZE_MENU)
TrackItem.icon_note = QtGui.QIcon(":/images/note.png")
FileItem.icon_file = QtGui.QIcon(":/images/file.png")
FileItem.icon_file_pending = QtGui.QIcon(":/images/file-pending.png")
FileItem.icon_error = icontheme.lookup('dialog-error', icontheme.ICON_SIZE_MENU)
FileItem.icon_saved = QtGui.QIcon(":/images/track-saved.png")
FileItem.match_icons = [
QtGui.QIcon(":/images/match-50.png"),
QtGui.QIcon(":/images/match-60.png"),
QtGui.QIcon(":/images/match-70.png"),
QtGui.QIcon(":/images/match-80.png"),
QtGui.QIcon(":/images/match-90.png"),
QtGui.QIcon(":/images/match-100.png"),
]
FileItem.match_pending_icons = [
QtGui.QIcon(":/images/match-pending-50.png"),
QtGui.QIcon(":/images/match-pending-60.png"),
QtGui.QIcon(":/images/match-pending-70.png"),
QtGui.QIcon(":/images/match-pending-80.png"),
QtGui.QIcon(":/images/match-pending-90.png"),
QtGui.QIcon(":/images/match-pending-100.png"),
]
self.icon_plugins = icontheme.lookup('applications-system', icontheme.ICON_SIZE_MENU)
def update_selection(self, i, j):
self._selected_view = i
self.views[j].clearSelection()
self._selected_objects.clear()
self._selected_objects.update(item.obj for item in self.views[i].selectedItems())
self.window.update_selection(self.selected_objects())
def update_selection_0(self):
if not self._ignore_selection_changes:
self._ignore_selection_changes = True
self.update_selection(0, 1)
self._ignore_selection_changes = False
def update_selection_1(self):
if not self._ignore_selection_changes:
self._ignore_selection_changes = True
self.update_selection(1, 0)
self._ignore_selection_changes = False
def update_current_view(self):
self.update_selection(self._selected_view, abs(self._selected_view - 1))
def remove(self, objects):
self._ignore_selection_changes = True
self.tagger.remove(objects)
self._ignore_selection_changes = False
view = self.views[self._selected_view]
index = view.currentIndex()
if index.isValid():
# select the current index
view.setCurrentIndex(index)
else:
self.update_current_view()
class BaseTreeView(QtGui.QTreeWidget):
options = [
Option("setting", "color_modified", QtGui.QColor(QtGui.QPalette.WindowText), QtGui.QColor),
Option("setting", "color_saved", QtGui.QColor(0, 128, 0), QtGui.QColor),
Option("setting", "color_error", QtGui.QColor(200, 0, 0), QtGui.QColor),
Option("setting", "color_pending", QtGui.QColor(128, 128, 128), QtGui.QColor),
]
def __init__(self, window, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.window = window
self.panel = parent
self.numHeaderSections = len(MainPanel.columns)
self.setHeaderLabels([_(h) for h, n in MainPanel.columns])
self.restore_state()
self.setAcceptDrops(True)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
# enable sorting, but don't actually use it by default
# XXX it would be nice to be able to go to the 'no sort' mode, but the
# internal model that QTreeWidget uses doesn't support it
self.header().setSortIndicator(-1, QtCore.Qt.AscendingOrder)
self.setSortingEnabled(True)
self.expand_all_action = QtGui.QAction(_("&Expand all"), self)
self.expand_all_action.triggered.connect(self.expandAll)
self.collapse_all_action = QtGui.QAction(_("&Collapse all"), self)
self.collapse_all_action.triggered.connect(self.collapseAll)
self.doubleClicked.connect(self.activate_item)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if not item:
return
obj = item.obj
plugin_actions = None
can_view_info = self.window.view_info_action.isEnabled()
menu = QtGui.QMenu(self)
if isinstance(obj, Track):
if can_view_info:
menu.addAction(self.window.view_info_action)
plugin_actions = list(_track_actions)
if obj.num_linked_files == 1:
menu.addAction(self.window.open_file_action)
menu.addAction(self.window.open_folder_action)
plugin_actions.extend(_file_actions)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
if isinstance(obj, NonAlbumTrack):
menu.addAction(self.window.refresh_action)
elif isinstance(obj, Cluster):
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
if isinstance(obj, UnmatchedFiles):
menu.addAction(self.window.cluster_action)
plugin_actions = list(_cluster_actions)
elif isinstance(obj, ClusterList):
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
plugin_actions = list(_clusterlist_actions)
elif isinstance(obj, File):
if can_view_info:
menu.addAction(self.window.view_info_action)
menu.addAction(self.window.open_file_action)
menu.addAction(self.window.open_folder_action)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.autotag_action)
menu.addAction(self.window.analyze_action)
plugin_actions = list(_file_actions)
elif isinstance(obj, Album):
if can_view_info:
menu.addAction(self.window.view_info_action)
menu.addAction(self.window.browser_lookup_action)
menu.addSeparator()
menu.addAction(self.window.refresh_action)
plugin_actions = list(_album_actions)
menu.addAction(self.window.save_action)
menu.addAction(self.window.remove_action)
bottom_separator = False
if isinstance(obj, Album) and not isinstance(obj, NatAlbum) and obj.loaded:
releases_menu = QtGui.QMenu(_("&Other versions"), menu)
menu.addSeparator()
menu.addMenu(releases_menu)
loading = releases_menu.addAction(_('Loading...'))
loading.setEnabled(False)
bottom_separator = True
if len(self.selectedIndexes()) == len(MainPanel.columns):
def _add_other_versions():
releases_menu.removeAction(loading)
for version in obj.release_group.versions:
action = releases_menu.addAction(version["name"])
action.setCheckable(True)
if obj.id == version["id"]:
action.setChecked(True)
action.triggered.connect(partial(obj.switch_release_version, version["id"]))
_add_other_versions() if obj.release_group.loaded else \
obj.release_group.load_versions(_add_other_versions)
releases_menu.setEnabled(True)
else:
releases_menu.setEnabled(False)
if self.config.setting["enable_ratings"] and \
len(self.window.selected_objects) == 1 and isinstance(obj, Track):
menu.addSeparator()
action = QtGui.QWidgetAction(menu)
action.setDefaultWidget(RatingWidget(menu, obj))
menu.addAction(action)
menu.addSeparator()
selected_albums = [a for a in self.window.selected_objects if type(a) == Album]
if selected_albums:
if not bottom_separator:
menu.addSeparator()
menu.addMenu(CollectionMenu(selected_albums, _("Collections"), menu))
if plugin_actions:
plugin_menu = QtGui.QMenu(_("&Plugins"), menu)
plugin_menu.setIcon(self.panel.icon_plugins)
menu.addSeparator()
menu.addMenu(plugin_menu)
plugin_menus = {}
for action in plugin_actions:
action_menu = plugin_menu
for index in xrange(1, len(action.MENU)):
key = tuple(action.MENU[:index])
try:
action_menu = plugin_menus[key]
except KeyError:
action_menu = plugin_menus[key] = action_menu.addMenu(key[-1])
action_menu.addAction(action)
if isinstance(obj, Cluster) or isinstance(obj, ClusterList) or isinstance(obj, Album):
menu.addSeparator()
menu.addAction(self.expand_all_action)
menu.addAction(self.collapse_all_action)
menu.exec_(event.globalPos())
event.accept()
def restore_state(self):
sizes = self.config.persist[self.view_sizes.name]
header = self.header()
sizes = sizes.split(" ")
try:
for i in range(self.numHeaderSections - 1):
header.resizeSection(i, int(sizes[i]))
except IndexError:
pass
def save_state(self):
cols = range(self.numHeaderSections - 1)
sizes = " ".join(str(self.header().sectionSize(i)) for i in cols)
self.config.persist[self.view_sizes.name] = sizes
def supportedDropActions(self):
return QtCore.Qt.CopyAction | QtCore.Qt.MoveAction
def mimeTypes(self):
"""List of MIME types accepted by this view."""
return ["text/uri-list", "application/picard.album-list"]
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.acceptProposedAction()
def startDrag(self, supportedActions):
"""Start drag, *without* using pixmap."""
items = self.selectedItems()
if items:
drag = QtGui.QDrag(self)
drag.setMimeData(self.mimeData(items))
drag.start(supportedActions)
def mimeData(self, items):
"""Return MIME data for specified items."""
album_ids = []
files = []
url = QtCore.QUrl.fromLocalFile
for item in items:
obj = item.obj
if isinstance(obj, Album):
album_ids.append(str(obj.id))
elif isinstance(obj, Track):
files.extend(url(file.filename) for file in obj.linked_files)
elif isinstance(obj, File):
files.append(url(obj.filename))
elif isinstance(obj, Cluster):
files.extend(url(file.filename) for file in obj.files)
elif isinstance(obj, ClusterList):
files.extend(url(file.filename) for cluster in obj for file in cluster.files)
mimeData = QtCore.QMimeData()
mimeData.setData("application/picard.album-list", "\n".join(album_ids))
if files:
mimeData.setUrls(files)
return mimeData
@staticmethod
def drop_urls(urls, target):
files = []
new_files = []
for url in urls:
if url.scheme() == "file" or not url.scheme():
# Dropping a file from iTunes gives a filename with a NULL terminator
filename = os.path.normpath(os.path.realpath(unicode(url.toLocalFile()).rstrip("\0")))
file = BaseTreeView.tagger.files.get(filename)
if file:
files.append(file)
elif os.path.isdir(encode_filename(filename)):
BaseTreeView.tagger.add_directory(filename)
else:
new_files.append(filename)
elif url.scheme() in ("http", "https"):
path = unicode(url.path())
match = re.search(r"/(release|recording)/([0-9a-z\-]{36})", path)
if match:
entity = match.group(1)
mbid = match.group(2)
if entity == "release":
BaseTreeView.tagger.load_album(mbid)
elif entity == "recording":
BaseTreeView.tagger.load_nat(mbid)
if files:
BaseTreeView.tagger.move_files(files, target)
if new_files:
BaseTreeView.tagger.add_files(new_files, target=target)
def dropEvent(self, event):
return QtGui.QTreeView.dropEvent(self, event)
def dropMimeData(self, parent, index, data, action):
target = None
if parent:
if index == parent.childCount():
item = parent
else:
item = parent.child(index)
if item is not None:
target = item.obj
self.log.debug("Drop target = %r", target)
handled = False
# text/uri-list
urls = data.urls()
if urls:
if target is None:
target = self.tagger.unmatched_files
self.drop_urls(urls, target)
handled = True
# application/picard.album-list
albums = data.data("application/picard.album-list")
if albums:
if isinstance(self, FileTreeView) and target is None:
target = self.tagger.unmatched_files
albums = [self.tagger.load_album(id) for id in str(albums).split("\n")]
self.tagger.move_files(self.tagger.get_files_from_objects(albums), target)
handled = True
return handled
def activate_item(self, index):
obj = self.itemFromIndex(index).obj
# Double-clicking albums should expand them. The album info can be
# viewed by using the toolbar button.
if not isinstance(obj, Album) and obj.can_view_info():
self.window.view_info()
def add_cluster(self, cluster, parent_item=None):
if parent_item is None:
parent_item = self.clusters
cluster_item = ClusterItem(cluster, not cluster.special, parent_item)
if cluster.hide_if_empty and not cluster.files:
cluster_item.update()
cluster_item.setHidden(True)
else:
cluster_item.add_files(cluster.files)
def moveCursor(self, action, modifiers):
if action in (QtGui.QAbstractItemView.MoveUp, QtGui.QAbstractItemView.MoveDown):
item = self.currentItem()
if item and not item.isSelected():
self.setCurrentItem(item)
return QtGui.QTreeWidget.moveCursor(self, action, modifiers)
class FileTreeView(BaseTreeView):
view_sizes = TextOption("persist", "file_view_sizes", "250 40 100")
def __init__(self, window, parent=None):
BaseTreeView.__init__(self, window, parent)
self.unmatched_files = ClusterItem(self.tagger.unmatched_files, False, self)
self.unmatched_files.update()
self.setItemExpanded(self.unmatched_files, True)
self.clusters = ClusterItem(self.tagger.clusters, False, self)
self.clusters.setText(0, _(u"Clusters"))
self.setItemExpanded(self.clusters, True)
self.tagger.cluster_added.connect(self.add_cluster)
self.tagger.cluster_removed.connect(self.remove_cluster)
def remove_cluster(self, cluster):
cluster.item.setSelected(False)
self.clusters.removeChild(cluster.item)
class AlbumTreeView(BaseTreeView):
view_sizes = TextOption("persist", "album_view_sizes", "250 40 100")
def __init__(self, window, parent=None):
BaseTreeView.__init__(self, window, parent)
self.tagger.album_added.connect(self.add_album)
self.tagger.album_removed.connect(self.remove_album)
def add_album(self, album):
item = AlbumItem(album, True, self)
item.setIcon(0, AlbumItem.icon_cd)
for i, column in enumerate(MainPanel.columns):
font = item.font(i)
font.setBold(True)
item.setFont(i, font)
item.setText(i, album.column(column[1]))
self.add_cluster(album.unmatched_files, item)
def remove_album(self, album):
album.item.setSelected(False)
self.takeTopLevelItem(self.indexOfTopLevelItem(album.item))
class TreeItem(QtGui.QTreeWidgetItem):
__lt__ = lambda self, other: False
def __init__(self, obj, sortable, *args):
QtGui.QTreeWidgetItem.__init__(self, *args)
self.obj = obj
if obj is not None:
obj.item = self
if sortable:
self.__lt__ = self._lt
def _lt(self, other):
column = self.treeWidget().sortColumn()
if column == 1:
return (self.obj.metadata.length or 0) < (other.obj.metadata.length or 0)
return self.text(column).toLower() < other.text(column).toLower()
class ClusterItem(TreeItem):
def __init__(self, *args):
TreeItem.__init__(self, *args)
self.setIcon(0, ClusterItem.icon_dir)
def update(self):
for i, column in enumerate(MainPanel.columns):
self.setText(i, self.obj.column(column[1]))
album = self.obj.related_album
if self.obj.special and album and album.loaded:
album.item.update(update_tracks=False)
if self.isSelected():
TreeItem.window.update_selection()
def add_file(self, file):
self.add_files([file])
def add_files(self, files):
if self.obj.hide_if_empty and self.obj.files:
self.setHidden(False)
self.update()
items = []
for file in files:
item = FileItem(file, True)
item.update()
items.append(item)
self.addChildren(items)
def remove_file(self, file):
file.item.setSelected(False)
self.removeChild(file.item)
self.update()
if self.obj.hide_if_empty and not self.obj.files:
self.setHidden(True)
class AlbumItem(TreeItem):
def update(self, update_tracks=True):
album = self.obj
if update_tracks:
oldnum = self.childCount() - 1
newnum = len(album.tracks)
if oldnum > newnum: # remove old items
for i in xrange(oldnum - newnum):
self.takeChild(newnum - 1)
oldnum = newnum
# update existing items
for i in xrange(oldnum):
item = self.child(i)
track = album.tracks[i]
item.obj = track
track.item = item
item.update(update_album=False)
if newnum > oldnum: # add new items
items = []
for i in xrange(newnum - 1, oldnum - 1, -1): # insertChildren is backwards
item = TrackItem(album.tracks[i], False)
item.setHidden(False) # Workaround to make sure the parent state gets updated
items.append(item)
self.insertChildren(oldnum, items)
for item in items: # Update after insertChildren so that setExpanded works
item.update(update_album=False)
self.setIcon(0, AlbumItem.icon_cd_saved if album.is_complete() else AlbumItem.icon_cd)
for i, column in enumerate(MainPanel.columns):
self.setText(i, album.column(column[1]))
if self.isSelected():
TreeItem.window.update_selection()
class TrackItem(TreeItem):
def update(self, update_album=True):
track = self.obj
if track.num_linked_files == 1:
file = track.linked_files[0]
file.item = self
color = TrackItem.track_colors[file.state]
bgcolor = get_match_color(file.similarity, TreeItem.base_color)
icon = FileItem.decide_file_icon(file)
self.takeChildren()
else:
color = TreeItem.text_color
bgcolor = get_match_color(1, TreeItem.base_color)
icon = TrackItem.icon_note
oldnum = self.childCount()
newnum = track.num_linked_files
if oldnum > newnum: # remove old items
for i in xrange(oldnum - newnum):
self.takeChild(newnum - 1).obj.item = None
oldnum = newnum
for i in xrange(oldnum): # update existing items
item = self.child(i)
file = track.linked_files[i]
item.obj = file
file.item = item
item.update()
if newnum > oldnum: # add new items
items = []
for i in xrange(newnum - 1, oldnum - 1, -1):
item = FileItem(track.linked_files[i], False)
item.update()
items.append(item)
self.addChildren(items)
self.setExpanded(True)
self.setIcon(0, icon)
for i, column in enumerate(MainPanel.columns):
self.setText(i, track.column(column[1]))
self.setForeground(i, color)
self.setBackground(i, bgcolor)
if self.isSelected():
TreeItem.window.update_selection()
if update_album:
self.parent().update(update_tracks=False)
class FileItem(TreeItem):
def update(self):
file = self.obj
self.setIcon(0, FileItem.decide_file_icon(file))
color = FileItem.file_colors[file.state]
bgcolor = get_match_color(file.similarity, TreeItem.base_color)
for i, column in enumerate(MainPanel.columns):
self.setText(i, file.column(column[1]))
self.setForeground(i, color)
self.setBackground(i, bgcolor)
if self.isSelected():
TreeItem.window.update_selection()
@staticmethod
def decide_file_icon(file):
if file.state == File.ERROR:
return FileItem.icon_error
elif isinstance(file.parent, Track):
if file.state == File.NORMAL:
return FileItem.icon_saved
elif file.state == File.PENDING:
return FileItem.match_pending_icons[int(file.similarity * 5 + 0.5)]
else:
return FileItem.match_icons[int(file.similarity * 5 + 0.5)]
elif file.state == File.PENDING:
return FileItem.icon_file_pending
else:
return FileItem.icon_file
| gpl-2.0 | -2,572,298,913,582,180,000 | 38.298295 | 102 | 0.600195 | false |
frankban/UbuntuPaste | ubuntupaste.py | 1 | 6417 | # This software is licensed under the GNU Affero General Public License
# version 3 (see the file LICENSE).
import itertools
import os
import pwd
import threading
import urllib
import urllib2
import webbrowser
import sublime
import sublime_plugin
class UserInterface(object):
"""User interface for this plugin."""
def __init__(self, command_name, view):
self.command_name = command_name.title()
self.view = view
self.count = itertools.count()
def _get_content(self, contents):
return '{0}: {1}'.format(self.command_name, ' '.join(contents))
def message(self, *contents):
"""Display a message in the status bar."""
sublime.status_message(self._get_content(contents))
def status(self, *contents):
"""Add a status to the view, using contents as value."""
self.view.set_status(self.command_name, self._get_content(contents))
def progress(self, url):
"""Show pasting progress."""
dots = '.' * (self.count.next() % 4)
self.status('Pasting to', url, '[', dots.ljust(3), ']')
def error(self, *contents):
"""Display an error in the status bar."""
self.message('ERROR:', *contents)
def success(self, result, copy_to_clipboard, open_in_browser):
"""Paste succeded."""
contents = ['URL:', result, '|']
if copy_to_clipboard:
contents.append('Copied to your clipboard!')
if open_in_browser:
contents.append('Opened in your browser!')
self.message(*contents)
def done(self):
"""Erase the status messages."""
self.view.erase_status(self.command_name)
class Settings(object):
"""Store and validate plugin settings."""
def __init__(self, global_settings, local_settings):
self._global_settings = global_settings
self._local_settings = local_settings
self.error = None
self.options = ()
def _get_poster(self):
"""Get the current system user name."""
return os.getenv('USER', pwd.getpwuid(os.geteuid()).pw_name)
def _get_syntax(self, syntax_map, default):
"""Return the syntax to be used by the paster."""
syntax_file = self._global_settings.get('syntax')
if syntax_file is None:
return default
syntax = os.path.splitext(os.path.basename(syntax_file))[0]
return syntax_map.get(syntax.lower(), default)
def are_valid(self):
"""Validate and set up options."""
settings = self._local_settings
url = settings.get('url')
if url is None:
self.error = 'Invalid URL.'
return False
copy_to_clipboard = settings.get('copy_to_clipboard', True)
open_in_browser = settings.get('open_in_browser', False)
if not (copy_to_clipboard or open_in_browser):
self.error = 'You need to either copy or open the URL.'
return False
poster = settings.get('poster')
if not poster:
poster = self._get_poster()
sep = settings.get('sep', '\n\n # ---\n\n')
syntax_default = settings.get('syntax_default', 'text')
syntax_guess = settings.get('syntax_guess', True)
if syntax_guess:
syntax_map = settings.get('syntax_map', {})
syntax = self._get_syntax(syntax_map, syntax_default)
else:
syntax = syntax_default
self.options = (
url, copy_to_clipboard, open_in_browser, poster, sep, syntax
)
return True
class Paster(threading.Thread):
"""Paste code snippets to ubuntu pastebin."""
def __init__(self, url, **kwargs):
self.url = url
self.data = kwargs
self.error = None
self.result = None
threading.Thread.__init__(self)
def run(self):
try:
request = urllib2.Request(
self.url, urllib.urlencode(self.data),
headers={'User-Agent': 'SublimeText2'})
response = urllib2.urlopen(request, timeout=5)
except urllib2.HTTPError as err:
self.error = 'HTTP error {0}.'.format(err.code)
except urllib2.URLError as err:
self.error = 'URL error {0}.'.format(err.reason)
else:
self.result = response.url
class UbuntupasteCommand(sublime_plugin.TextCommand):
"""Paste code snippets on http://pastebin.ubuntu.com/."""
def __init__(self, *args, **kwargs):
self.ui = None
self._is_enabled = True
super(UbuntupasteCommand, self).__init__(*args, **kwargs)
def is_enabled(self):
return self._is_enabled
def get_content(self, sep):
"""Return the contents of current selections.
If no region is selected, return all the text in the current view.
"""
view = self.view
regions = [i for i in view.sel() if not i.empty()]
if not regions:
regions = [sublime.Region(0, view.size())]
return sep.join(view.substr(region) for region in regions)
def run(self, edit):
self._is_enabled = False
self.ui = UserInterface(self.name(), self.view)
settings = Settings(
self.view.settings(),
sublime.load_settings('UbuntuPaste.sublime-settings'))
if settings.are_valid():
self.handle(*settings.options)
else:
self.ui.error(settings.error)
def handle(
self, url, copy_to_clipboard, open_in_browser, poster, sep, syntax):
paster = Paster(
url, content=self.get_content(sep), poster=poster, syntax=syntax)
self.ui.progress(url)
paster.start()
self.wait(paster, copy_to_clipboard, open_in_browser)
def wait(self, paster, *args):
if not paster.is_alive():
return self.done(paster, *args)
self.ui.progress(paster.url)
sublime.set_timeout(lambda: self.wait(paster, *args), 200)
def done(self, paster, copy_to_clipboard, open_in_browser):
result = paster.result
if result:
if copy_to_clipboard:
sublime.set_clipboard(result)
if open_in_browser:
webbrowser.open(result)
self.ui.success(result, copy_to_clipboard, open_in_browser)
else:
self.ui.error(paster.error)
self.ui.done()
self._is_enabled = True
| agpl-3.0 | 5,063,318,384,990,150,000 | 32.773684 | 77 | 0.59171 | false |
MontrealCorpusTools/polyglot-server | iscan/annotator/models.py | 1 | 4133 | from django.db import models
from polyglotdb import CorpusContext
# Create your models here.
class Annotation(models.Model):
ITEM_TYPE_CHOICES = (('U', 'Utterance'),
('W', 'Word'),
('Y', 'Syllable'),
('P', 'Phone'))
corpus = models.ForeignKey('iscan.Corpus', on_delete=models.CASCADE)
item_type = models.CharField(max_length=1, choices=ITEM_TYPE_CHOICES, default='P')
label = models.CharField(max_length=100)
save_user = models.BooleanField(default=False)
def __str__(self):
return '{}'.format(self.label)
def check_hierarchy(self):
a_type = self.get_item_type_display().lower()
with CorpusContext(self.corpus.config) as c:
if not c.hierarchy.has_subannotation_type(self.label):
properties = []
if self.save_user:
properties =[('user', str)]
for field in self.fields.all():
if field.annotation_choice == 'N':
t = float
elif field.annotation_choice == 'B':
t = bool
else:
t = str
properties.append((field.label, t))
c.hierarchy.add_subannotation_type(c, a_type, self.label, properties=properties)
def add_property(self, field):
props = []
if field.annotation_choice == 'N':
t = float
elif field.annotation_choice == 'B':
t = bool
else:
t = str
props.append((field.label, t))
with CorpusContext(self.corpus.config) as c:
c.hierarchy.add_subannotation_properties(c, self.label, props)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def remove_property(self, field):
props = []
props.append(field.label)
with CorpusContext(self.corpus.config) as c:
c.hierarchy.remove_subannotation_properties(c, self.label, props)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def save(self, *args, **kwargs):
a_type = self.get_item_type_display().lower()
s_type = self.label
with CorpusContext(self.corpus.config) as c:
if not c.hierarchy.has_subannotation_type(s_type):
properties = []
if self.save_user:
properties =[('user', str)]
c.hierarchy.add_subannotation_type(c, a_type, s_type, properties=properties)
super(Annotation, self).save(*args, **kwargs)
print(c.hierarchy.subannotations)
print(c.hierarchy.subannotation_properties)
def delete(self, using=None, keep_parents=False):
with CorpusContext(self.corpus.config) as c:
c.hierarchy.remove_subannotation_type(c, self.label)
super(Annotation, self).delete(using=None, keep_parents=False)
class AnnotationField(models.Model):
FIELD_CHOICES = (('C', 'Choice field'),
('S', 'String'),
('B', 'Boolean'),
('N', 'Numeric'))
annotation = models.ForeignKey(Annotation, on_delete=models.CASCADE, related_name='fields')
annotation_choice = models.CharField(max_length=1, choices=FIELD_CHOICES, default='C')
label = models.CharField(max_length=100)
def __str__(self):
return '{} {}'.format(self.annotation, self.label)
def save(self, *args, **kwargs):
super(AnnotationField, self).save(*args, **kwargs)
self.annotation.add_property(self)
def delete(self, using=None, keep_parents=False):
self.annotation.remove_property(self)
super(AnnotationField, self).delete(using=None, keep_parents=False)
class AnnotationChoice(models.Model):
annotation = models.ForeignKey(AnnotationField, on_delete=models.CASCADE, related_name='choices')
choice = models.CharField(max_length=100)
def __str__(self):
return '{} = {}'.format(self.annotation, self.choice)
| mit | -4,415,763,708,759,667,700 | 37.990566 | 101 | 0.585289 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.