repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
eHealthAfrica/rapidsms_textit | rapidsms_textit/tests/test_views.py | 1 | 2493 | from __future__ import print_function, unicode_literals
import json
import logging
import mock
from django.core.urlresolvers import reverse
from .utils import TextItTest
logger = logging.getLogger(__name__)
class TextItViewTest(TextItTest):
disable_phases = True
def send_to_view(self, data):
"""Send data to the textit view, return whatever the response is"""
encoded_data = {u'status': u'P', u'direction': u'I', u'phone': u'+' + data['phone'],
u'text': data['text'], u'sms': u'449177', u'relayer': u'2166',
u'time': u'2014-08-04T17:05:16.000000', u'relayer_phone': u'+23480998904',
u'event': data['event']}
url = reverse('textit-backend') + '?key=somefunnystring'
return self.client.post(url, encoded_data)
def test_cannot_get(self):
# GET is not a valid method
response = self.client.get(reverse('textit-backend'))
self.assertEqual(405, response.status_code)
def test_invalid_response(self):
"""HTTP 400 should return if data is invalid."""
data = {'event': 'illegal', 'phone': '42', 'text': 'hi there'}
conn = mock.Mock()
with mock.patch('rapidsms_textit.views.lookup_connections') as \
lookup_connections:
lookup_connections.return_value = [conn]
with mock.patch('rapidsms_textit.views.receive') as receive:
response = self.send_to_view(data)
self.assertEqual(response.status_code, 400)
receive.assert_called()
def test_incoming_message(self):
# If we call the view as if TextIt is delivering a message, the
# message is passed to RapidSMS. Any unicode is preserved.
text = u"TEXT MESSAGE \u0123\u4321"
data = {
'event': 'mo_sms',
'phone': '42',
'text': text,
}
conn = mock.Mock()
with mock.patch('rapidsms_textit.views.lookup_connections') as \
lookup_connections:
lookup_connections.return_value = [conn]
with mock.patch('rapidsms_textit.views.receive') as receive:
response = self.send_to_view(data)
self.assertEqual(200, response.status_code, response.content)
receive.assert_called()
args, kwargs = receive.call_args
received_text, connection = args
self.assertEqual(text, received_text)
self.assertEqual(conn, connection)
| bsd-3-clause | 7,159,265,041,052,526,000 | 37.353846 | 98 | 0.60369 | false |
afomm/autotest-docker | dockertest/subtestbase.py | 1 | 8725 | """
Adapt/extend autotest.client.test.test for Docker test sub-framework
Implement docker subtest base to avoid circular dependences in dockertest core
modules.
"""
# Pylint runs from a different directory, it's fine to import this way
# pylint: disable=W0403
import logging
import traceback
from xceptions import DockerTestFail
from config import get_as_list
class SubBase(object):
"""
Methods/attributes common to Subtest & SubSubtest classes
:note: This class is indirectly referenced by the control-file
so it cannot contain anything dockertest-implementation
specific.
"""
#: Configuration section for subclass, auto-generated by ``__init__``.
config_section = None
#: Configuration dictionary (read-only for instances)
config = None
#: Unique temporary directory for this instance (automatically cleaned up)
#: **warning**: DO NOT ASSUME DIRECTORY WILL BE EMPTY!!!
tmpdir = None
#: Number of additional space/tab characters to prefix when logging
n_spaces = 16 # date/timestamp length
#: Number of additional space/tab characters to prefix when logging
n_tabs = 1 # one-level
step_log_msgs = {
"initialize": "initialize()",
"run_once": "run_once()",
"postprocess": "postprocess()",
"cleanup": "cleanup()"
}
def __init__(self, *args, **dargs):
super(SubBase, self).__init__(*args, **dargs)
self.step_log_msgs = self.step_log_msgs.copy()
def initialize(self):
"""
Called every time the test is run.
"""
self.log_step_msg('initialize')
# Issue warnings for failed to customize suggested options
not_customized = self.config.get('__example__', None)
if not_customized is not None and not_customized is not '':
self.logdebug("WARNING: Recommended options not customized:")
for nco in get_as_list(not_customized):
self.logdebug("WARNING: %s" % nco)
self.logwarning("WARNING: Test results may be externally "
"dependent! (See debug log for details)")
msg = "%s configuration:\n" % self.__class__.__name__
for key, value in self.config.items():
if key == '__example__' or key.startswith('envcheck'):
continue
msg += '\t\t%s = "%s"\n' % (key, value)
self.logdebug(msg)
def run_once(self):
"""
Called once only to exercise subject of sub-subtest
"""
self.log_step_msg('run_once')
def postprocess(self):
"""
Called to process results of subject
"""
self.log_step_msg('postprocess')
def cleanup(self):
"""
Always called, before any exceptions thrown are re-raised.
"""
self.log_step_msg('cleanup')
def log_step_msg(self, stepname):
"""
Send message stored in ``step_log_msgs`` key ``stepname`` to logingo
"""
msg = self.step_log_msgs.get(stepname)
if msg:
self.loginfo(msg)
@staticmethod
def failif(condition, reason=None):
"""
Convenience method for subtests to avoid importing ``TestFail``
exception
:param condition: Boolean condition, fail test if True.
:param reason: Helpful text describing why the test failed
:raise DockerTestFail: If condition evaluates ``True``
"""
if reason is None:
reason = "Failed test condition"
if bool(condition):
raise DockerTestFail(reason)
@staticmethod
def failif_ne(actual, expected, reason=None):
"""
Convenience method for subtests to compare two values and
fail if they differ. Failure message will include the expected
and actual values for ease of debugging.
:param actual: value being tested
:param expected: value to which we compare.
:param reason: Helpful text describing why the test failed
:raise DockerTestFail: If actual != expected
"""
if actual == expected:
return
if reason is None:
reason = "Failed test condition"
# By default, quote each value. This is especially helpful when
# actual or expected is the empty string or a string with spaces.
# But if both are numeric types the quotes distract, so remove them.
arg = "'{}'"
if all(isinstance(x, (int, long, float)) for x in [actual, expected]):
arg = "{}"
spec = "{}: expected " + arg + "; got " + arg
raise DockerTestFail(spec.format(reason, expected, actual))
@staticmethod
def failif_not_in(needle, haystack, description=None):
"""
Convenience method for subtests to test for an expected substring
being contained in a larger string, e.g. to look for XYZ in a
command's stdout/stderr.
:param needle: the string you're looking for
:param haystack: the actual string, e.g stdout results from a command
:param description: description of haystack, e.g. 'stdout from foo'
:raise DockerTestFail: if needle is not found in haystack
"""
if description is None:
description = 'string'
if needle in haystack:
return
raise DockerTestFail("Expected string '%s' not in %s '%s'"
% (needle, description, haystack))
@classmethod
def log_x(cls, lvl, msg, *args):
"""
Send ``msg`` & ``args`` through to logging module function with
name ``lvl``
"""
meth = getattr(logging, lvl)
testname = cls.__name__
return meth("%s%s: %s" % ("\t" * cls.n_tabs, testname, msg), *args)
@classmethod
def log_xn(cls, lvl, msg, *args):
"""
Multiline-split and send msg & args through to logging module
:param lvl: logging method name (``'debug'``, ``'info'``, etc.)
:param msg: Message format-string
"""
# date, loglevel, this module offset
newline = '\n' + ' ' * cls.n_spaces + '\t' * cls.n_tabs
newline += " " * (len(cls.__name__) + 2) # cls name + ': '
try:
msg = (str(msg) % args).replace('\n', newline)
except TypeError:
if args is tuple():
cls.logwarning("Following message contains format strings but "
"has no arguments:")
msg = str(msg).replace('\n', newline)
else:
raise TypeError("Not all arguments converted during formatting"
": msg='%s', args=%s" % (msg, args))
return cls.log_x(lvl, msg)
@classmethod
def logdebug(cls, message, *args):
r"""
Log a DEBUG level message to the controlling terminal **only**
:param message: Same as ``logging.debug()``
:param \*args: Same as ``logging.debug()``
"""
# Never split over multiple lines
cls.log_x('debug', message, *args)
@classmethod
def loginfo(cls, message, *args):
r"""
Log a INFO level message to the controlling terminal **only**
:param message: Same as ``logging.info()``
:param \*args: Same as ``logging.info()``
"""
cls.log_xn('info', message, *args)
@classmethod
def logwarning(cls, message, *args):
r"""
Log a WARNING level message to the controlling terminal **only**
:param message: Same as ``logging.warning()``
:param \*args: Same as ``logging.warning()``
"""
cls.log_xn('warn', message, *args)
@classmethod
def logerror(cls, message, *args):
r"""
Log a ERROR level message to the controlling terminal **only**
:param message: Same as ``logging.error()``
:param \*args: Same as ``logging.error()``
"""
cls.log_xn('error', message, *args)
@classmethod
def logtraceback(cls, name, exc_info, error_source, detail):
r"""
Log error to error, traceback to debug, of controlling terminal
**only**
"""
error_head = ("%s failed to %s\n%s\n%s" % (name, error_source,
detail.__class__.__name__,
detail))
error_tb = traceback.format_exception(exc_info[0],
exc_info[1],
exc_info[2])
error_tb = "".join(error_tb).strip() + "\n"
cls.logerror(error_head)
cls.logdebug(error_tb)
| gpl-2.0 | -758,189,259,401,791,900 | 33.760956 | 79 | 0.56894 | false |
mwaskom/lyman | lyman/frontend.py | 1 | 14777 | """Forward facing lyman tools with information about ecosystem."""
import os
import os.path as op
import tempfile
import re
import sys
import imp
import shutil
from textwrap import dedent
import yaml
import numpy as np
import nipype
from traits.api import (HasTraits, Str, Bool, Float, Int,
Tuple, List, Dict, Enum, Either)
__all__ = ["info", "subjects", "execute"]
class ProjectInfo(HasTraits):
"""General information common to a project."""
data_dir = Str(
"../data",
desc=dedent("""
The location where raw data is stored. Should be defined relative
to the ``lyman_dir``.
"""),
)
proc_dir = Str(
"../proc",
desc=dedent("""
The location where lyman workflows will output persistent data. Should
be defined relative to the ``lyman_dir``.
"""),
)
cache_dir = Str(
"../cache",
desc=dedent("""
The location where lyman workflows will write intermediate files during
execution. Should be defined relative to the ``lyman_dir``.
"""),
)
remove_cache = Bool(
True,
desc=dedent("""
If True, delete the cache directory containing intermediate files after
successful execution of the workflow. This behavior can be overridden
at runtime by command-line arguments.
"""),
)
fm_template = Str(
"{session}_fieldmap_{encoding}.nii.gz",
desc=dedent("""
A template string to identify session-specific fieldmap files.
"""),
)
ts_template = Str(
"{session}_{experiment}_{run}.nii.gz",
desc=dedent("""
A template string to identify time series data files.
"""),
)
sb_template = Str(
"{session}_{experiment}_{run}_ref.nii.gz",
desc=dedent("""
A template string to identify reference volumes corresponding to each
run of time series data.
"""),
)
voxel_size = Tuple(
Float(2), Float(2), Float(2),
desc=dedent("""
The voxel size to use for the functional template.
"""),
)
phase_encoding = Enum(
"pa", "ap",
desc=dedent("""
The phase encoding direction used in the functional acquisition.
"""),
)
scan_info = Dict(
Str, Dict(Str, Dict(Str, List(Str))),
desc=dedent("""
Information about scanning sessions. (Automatically populated by
reading the ``scans.yaml`` file).
"""),
)
class ModelInfo(HasTraits):
"""Model-specific level of information about a specific model."""
model_name = Str(
desc=dedent("""
The name of the model. (Automatically populated from module name).
""")
)
task_model = Bool(
True,
desc=dedent("""
If True, model the task using a design file matching the model name.
""")
)
smooth_fwhm = Either(
Float(2), None,
desc=dedent("""
The size of the Gaussian smoothing kernel for spatial filtering.
"""),
)
surface_smoothing = Bool(
True,
desc=dedent("""
If True, filter cortical voxels using Gaussian weights computed along
the surface mesh.
"""),
)
interpolate_noise = Bool(
False,
desc=dedent("""
If True, identify locally noisy voxels and replace replace their values
using interpolation during spatial filtering. Warning: this option is
still being refined.
"""),
)
hpf_cutoff = Either(
Float(128), None,
usedefault=True,
desc=dedent("""
The cutoff value (in seconds) for the temporal high-pass filter.
"""),
)
percent_change = Bool(
False,
desc=dedent("""
If True, convert data to percent signal change units before model fit.
"""),
)
nuisance_components = Dict(
Enum("wm", "csf", "edge", "noise"), Int,
usedefault=True,
desc=dedent("""
Anatomical sources and number of components per source to include.
""")
)
save_residuals = Bool(
False,
desc=dedent("""
If True, write out an image with the residual time series in each voxel
after model fitting.
"""),
)
hrf_derivative = Bool(
True,
desc=dedent("""
If True, include the temporal derivative of the HRF model.
"""),
)
# TODO parameter names to filter the design and generate default contrasts?
contrasts = List(
Tuple(Str, List(Str), List(Float)),
desc=dedent("""
Definitions for model parameter contrasts. Each item in the list should
be a tuple with the fields: (1) the name of the contrast, (2) the names
of the parameters included in the contrast, and (3) the weights to
apply to the parameters.
"""),
)
class ExperimentInfo(ModelInfo):
"""More specific experiment-level information."""
experiment_name = Str(
desc=dedent("""
The name of the experiment. (Automatically populated from module name).
"""),
)
tr = Float(
desc=dedent("""
The temporal resolution of the functional acquisition in seconds.
"""),
)
crop_frames = Int(
0,
desc=dedent("""
The number of frames to remove from the beginning of each time series
during preprocessing.
"""),
)
class LymanInfo(ProjectInfo, ExperimentInfo):
"""Combination of all information classes."""
pass
def load_info_from_module(module_name, lyman_dir):
"""Load lyman information from a Python module as specified by name."""
module_file_name = op.join(lyman_dir, module_name + ".py")
module_sys_name = "lyman_" + module_name
# Load the module from memory or disk
try:
module = sys.modules[module_sys_name]
except KeyError:
module = imp.load_source(module_sys_name, module_file_name)
# Parse the "normal" variables from the info module
module_vars = {k: v
for k, v in vars(module).items()
if not re.match("__.+__", k)}
return module_vars
def load_scan_info(lyman_dir=None):
"""Load information about subjects, sessions, and runs from scans.yaml."""
if lyman_dir is None:
lyman_dir = os.environ.get("LYMAN_DIR", None)
if lyman_dir is None:
return {}
scan_fname = op.join(lyman_dir, "scans.yaml")
with open(scan_fname) as fid:
info = yaml.load(fid, Loader=yaml.BaseLoader)
return info
def check_extra_vars(module_vars, spec):
"""Raise when unexpected information is defined to avoid errors."""
kind = spec.__name__.lower().strip("info")
extra_vars = set(module_vars) - set(spec().trait_names())
if extra_vars:
msg = ("The following variables were unexpectedly present in the "
"{} information module: {}".format(kind, ", ".join(extra_vars)))
raise RuntimeError(msg)
def info(experiment=None, model=None, lyman_dir=None):
"""Load information from various files to control analyses.
The default behavior (when called with no arguments) is to load project
level information. Additional information can be loaded by specifying an
experiment or an experiment and a model.
Parameters
----------
experiment : string
Name of the experiment to load information for. Will include
information from the file <lyman_dir>/<experiment>.py.
model : string
Name of the model to load information for. Requires having also
specified an experiment. Will include information from the file
<lyman_dir>/<experiment>-<model>.py.
lyman_dir : string
Path to the directory where files with information are stored;
otherwise read from the $LYMAN_DIR environment variable.
Returns
-------
info : LymanInfo
This object has traits with various analysis-related parameters.
"""
if lyman_dir is None:
lyman_dir = os.environ.get("LYMAN_DIR", None)
# --- Load project-level information
if lyman_dir is None:
project_info = {}
else:
project_info = load_info_from_module("project", lyman_dir)
check_extra_vars(project_info, ProjectInfo)
project_info["scan_info"] = load_scan_info(lyman_dir)
# --- Load the experiment-level information
if experiment is None:
experiment_info = {}
else:
experiment_info = load_info_from_module(experiment, lyman_dir)
experiment_info["experiment_name"] = experiment
check_extra_vars(experiment_info, ExperimentInfo)
# --- Load the model-level information
if model is None:
model_info = {}
else:
if experiment is None:
raise RuntimeError("Loading a model requires an experiment")
model_info = load_info_from_module(experiment + "-" + model, lyman_dir)
model_info["model_name"] = model
check_extra_vars(model_info, ModelInfo)
# TODO set default single parameter contrasts?
# --- Set the output traits, respecting inheritance
info = (LymanInfo()
.trait_set(**project_info)
.trait_set(**experiment_info)
.trait_set(**model_info))
# Ensure that directories are specified as real absolute paths
if lyman_dir is None:
base = op.join(tempfile.mkdtemp(), "lyman")
else:
base = lyman_dir
directories = ["data_dir", "proc_dir", "cache_dir"]
orig = info.trait_get(directories)
full = {k: op.abspath(op.join(base, v)) for k, v in orig.items()}
for d in full.values():
if not op.exists(d):
os.mkdir(d)
info.trait_set(**full)
return info
def subjects(subject_arg=None, sessions=None, lyman_dir=None):
"""Find a list of subjects in a variety of ways.
Parameters
----------
subject_arg : list or string
This argument can take several forms:
- None, in which case all subject ids in scans.yaml are used.
- A list of subject ids or single subject id which will be used.
- The name (without extension) of a text file in the <lyman_dir>
containing list of subject ids, or a list with a single entry
corresponding to the name of a file.
- A single subject id, which will be used.
sessions : list
A list of session ids. Only valid when there is a single subject
in the returned list. This parameter can be used to validate that
the requested sessions exist for the requested subject.
lyman_dir : string
Path to the directory where files with information are stored;
otherwise read from the $LYMAN_DIR environment variable.
Returns
-------
subjects : list of strings
A list of subject ids.
"""
scan_info = load_scan_info(lyman_dir)
if lyman_dir is None:
lyman_dir = os.environ.get("LYMAN_DIR", None)
if lyman_dir is None:
return []
# -- Parse the input
if isinstance(subject_arg, list) and len(subject_arg) == 1:
subject_arg = subject_arg[0]
string_arg = isinstance(subject_arg, str)
if subject_arg is None:
subjects = list(scan_info)
elif string_arg:
subject_path = op.join(lyman_dir, subject_arg + ".txt")
if op.isfile(subject_path):
subjects = np.loadtxt(subject_path, str, ndmin=1).tolist()
else:
subjects = [subject_arg]
else:
subjects = subject_arg
# -- Check the input
unexepected_subjects = set(subjects) - set(scan_info)
if unexepected_subjects:
msg = "Specified subjects were not in scans.yaml: {}"
raise RuntimeError(unexepected_subjects)
if sessions is not None:
try:
subject, = subjects
except ValueError:
raise RuntimeError("Can only specify session for single subject")
unexpected_sessions = set(sessions) - set(scan_info[subject])
if unexpected_sessions:
msg = "Specified sessions were not in scans.yaml for {}: {}"
raise RuntimeError(msg.format(subject, unexpected_sessions))
return subjects
def execute(wf, args, info):
"""Main interface for (probably) executing a nipype workflow.
Depending on the command-line and module-based parameters, different things
might happen with the workflow object. See the code for details.
Parameters
----------
wf : Workflow
Nipype workflow graph with processing steps.
args : argparse Namespace
Parsed arguments from lyman command-line interface.
info : LymanInfo
Analysis execution parameters from lyman info files.
Returns
-------
res : variable
The result of the execution, which can take several forms. See the
code to understand the relationship between input parameters and
output type.
"""
# Set a location for the workflow to save debugging files on a crash
crash_dir = op.join(info.cache_dir, "crashdumps")
wf.config["execution"]["crashdump_dir"] = crash_dir
# Set various nipype config options if debugging
if args.debug:
nipype.config.enable_debug_mode()
# Locate the directory where intermediate processing outputs will be stored
# and optionally remove it to force a full clean re-run of the workflow.
cache_dir = op.join(wf.base_dir, wf.name)
if args.clear_cache:
if op.exists(cache_dir):
shutil.rmtree(cache_dir)
# One option is to do nothing (allowing a check from the command-line that
# everything is specified properly),
if not args.execute:
res = None
# Another option is to generate an svg image of the workflow graph
elif args.graph:
if args.graph is True:
fname = args.stage
else:
fname = args.graph
res = wf.write_graph(fname, "orig", "svg")
# Alternatively, submit the workflow to the nipype execution engine
else:
# TODO expose other nipype plugins as a command-line parameter
if args.n_procs == 1:
plugin, plugin_args = "Linear", {}
else:
plugin, plugin_args = "MultiProc", {"n_procs": args.n_procs}
res = wf.run(plugin, plugin_args)
# After successful completion of the workflow, optionally delete the
# intermediate files, which are not usually needed aside from debugging
# (persistent outputs go into the `info.proc_dir`).
if info.remove_cache and not args.debug and op.exists(cache_dir):
shutil.rmtree(cache_dir)
return res
| bsd-3-clause | 1,358,385,128,766,538,500 | 30.984848 | 79 | 0.61589 | false |
anneline/Bika-LIMS | bika/lims/utils/analysisrequest.py | 1 | 4343 | from Products.CMFCore.utils import getToolByName
from bika.lims.interfaces import ISample
from bika.lims.utils import tmpID
from bika.lims.utils.sample import create_sample
from bika.lims.utils.samplepartition import create_samplepartition
from bika.lims.workflow import doActionFor
from Products.CMFPlone.utils import _createObjectByType
def create_analysisrequest(
context,
request,
values,
analyses=[],
partitions=None,
specifications=None,
prices=None
):
# Gather neccesary tools
workflow = getToolByName(context, 'portal_workflow')
bc = getToolByName(context, 'bika_catalog')
# Create new sample or locate the existing for secondary AR
if values['Sample']:
secondary = True
if ISample.providedBy(values['Sample']):
sample = values['Sample']
else:
sample = bc(UID=values['Sample'])[0].getObject()
workflow_enabled = sample.getSamplingWorkflowEnabled()
else:
secondary = False
workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
sample = create_sample(context, request, values)
# Create the Analysis Request
ar = _createObjectByType('AnalysisRequest', context, tmpID())
ar.setSample(sample)
# processform renames the sample, this requires values to contain the Sample.
values['Sample'] = sample
ar.processForm(REQUEST=request, values=values)
# Object has been renamed
ar.edit(RequestID=ar.getId())
# Set initial AR state
workflow_action = 'sampling_workflow' if workflow_enabled \
else 'no_sampling_workflow'
workflow.doActionFor(ar, workflow_action)
# Set analysis request analyses
analyses = ar.setAnalyses(analyses, prices=prices, specs=specifications)
skip_receive = ['to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved']
if secondary:
# Only 'sample_due' and 'sample_recieved' samples can be selected
# for secondary analyses
doActionFor(ar, 'sampled')
doActionFor(ar, 'sample_due')
sample_state = workflow.getInfoFor(sample, 'review_state')
if sample_state not in skip_receive:
doActionFor(ar, 'receive')
for analysis in ar.getAnalyses(full_objects=1):
doActionFor(analysis, 'sample_due')
analysis_state = workflow.getInfoFor(analysis, 'review_state')
if analysis_state not in skip_receive:
doActionFor(analysis, 'receive')
if not secondary:
# Create sample partitions
for n, partition in enumerate(partitions):
# Calculate partition id
partition_prefix = sample.getId() + "-P"
partition_id = '%s%s' % (partition_prefix, n + 1)
partition['part_id'] = partition_id
# Point to or create sample partition
if partition_id in sample.objectIds():
partition['object'] = sample[partition_id]
else:
partition['object'] = create_samplepartition(
sample,
partition,
analyses
)
# If Preservation is required for some partitions,
# and the SamplingWorkflow is disabled, we need
# to transition to to_be_preserved manually.
if not workflow_enabled:
to_be_preserved = []
sample_due = []
lowest_state = 'sample_due'
for p in sample.objectValues('SamplePartition'):
if p.getPreservation():
lowest_state = 'to_be_preserved'
to_be_preserved.append(p)
else:
sample_due.append(p)
for p in to_be_preserved:
doActionFor(p, 'to_be_preserved')
for p in sample_due:
doActionFor(p, 'sample_due')
doActionFor(sample, lowest_state)
doActionFor(ar, lowest_state)
# Transition pre-preserved partitions
for p in partitions:
if 'prepreserved' in p and p['prepreserved']:
part = p['object']
state = workflow.getInfoFor(part, 'review_state')
if state == 'to_be_preserved':
workflow.doActionFor(part, 'preserve')
# Return the newly created Analysis Request
return ar
| agpl-3.0 | -847,201,544,666,750,600 | 36.439655 | 81 | 0.617776 | false |
benschneider/Generic-Sweepscript | GsweepI1I1.py | 1 | 6469 | '''
Generic Sweep script
(currently setup for no more than 3 dims)
20/10/2015
- B
'''
#import numpy as np
from time import time, sleep
from parsers import copy_file
from ramp_mod import ramp
from DataStorer import DataStoreSP # , DataStore2Vec, DataStore11Vec
# Drivers
from dummydriver import instrument as dummy
from keithley2000 import instrument as key2000
from AnritzuSig import instrument as AnSigGen
from SRsim import instrument as sim900c
from Sim928 import instrument as sim928c
# from Yoko import instrument as yoko
from AfDigi import instrument as AfDig # Digitizer driver
import gc # Garbage memory collection
from IQcorr import Process as CorrProc # Handle Correlation measurements
# import sys
# from RSZNB20 import instrument as ZNB20
import os
''' Photon Correlation of the upper left quadrature '''
thisfile = __file__
#filen_0 = '1205_I1I1'
#folder = 'data_Jul12\\'
folder = folder + filen_0 + '\\' # in one new folder
if not os.path.exists(folder):
os.makedirs(folder)
sim900 = sim900c('GPIB0::12::INSTR')
vm = key2000('GPIB0::29::INSTR')
# Digitizer setup
lags = 30
BW = 1e4
lsamples = 1e4
corrAvg = 1
f1 = 4.1e9 # 4.799999e9
f2 = 4.1e9
# Start with both having the same frequency
D1 = AfDig(adressDigi='3036D1', adressLo='3011D1', LoPosAB=0, LoRef=0,
name='D1', cfreq=f1, inputlvl=-15,
start=(-lags / BW), stop=(lags / BW), pt=(lags * 2 - 1),
nSample=lsamples, sampFreq=BW)
D2 = AfDig(adressDigi='3036D2', adressLo='3010D2', LoPosAB=1, LoRef=3,
name='D2', cfreq=f2, inputlvl=-15,
start=(-lags / BW), stop=(lags / BW), pt=(lags * 2 - 1),
nSample=lsamples, sampFreq=BW)
# Sweep equipment setup
pFlux = AnSigGen('GPIB0::8::INSTR', name='FluxPump',
start=0.02, stop=0.001, pt=41,
sstep=10, stime=0)
#D12spacing = dummy(name='D1-f',
# start=5.4e9, stop=3.5e9, pt=1,
# sstep=4e9, stime=0.0)
vBias = sim928c(sim900, name='V 1Mohm', sloti=4,
start=0.0, stop=0.0, pt=1,
sstep=0.060, stime=0.020)
vMag = sim928c(sim900, name='Magnet V R=22.19KOhm', sloti=3,
start=-1.09, stop=-1.09, pt=1,
sstep=0.03, stime=0.020)
nothing = dummy(name='nothing',
start=0, stop=1, pt=1,
sstep=1.0, stime=0.0)
pFlux.set_power_mode(1) # Linear mode in mV
pFlux.set_freq(8.9e9) # f1+f2)
pFlux.sweep_par='power' # Power sweep
# D12spacing.D1 = D1 # assign objects (in reverse D1 f > D2 f)
# D12spacing.D2 = D2
# D12spacing.sweep_par = 'f12'
# D12spacing.cfreq = f1+f2
# sweep_dim_1(vBias, 0.002)
dim_1 = pFlux
dim_1.UD = False
dim_1.defval = 0.001
dim_2 = vMag
dim_2.defval = -1.1
dim_3 = nothing
dim_3.defval = 0.0
sgen = None
recordD12 = True # all D1 D2 data storage
D12 = CorrProc(D1, D2, pFlux, sgen, lags, BW, lsamples, corrAvg)
D12.doHist2d = False # Record Histograms (Larger -> Slower)
D12.doCorrel = True
D12.doRaw = True
D12.doBG = True
# This describes how data is saved
DS = DataStoreSP(folder, filen_0, dim_1, dim_2, dim_3, 'Vx1k')
DS.ask_overwrite()
copy_file(thisfile, filen_0, folder)
# CorrProc controls, coordinates D1 and D2 together (also does thes calcs.)
if recordD12:
D12.create_datastore_objs(folder, filen_0, dim_1, dim_2, dim_3)
def sweep_dim_1(obj, value):
ramp(obj, obj.sweep_par, value, obj.sstep, obj.stime)
def sweep_dim_2(obj, value):
ramp(obj, obj.sweep_par, value, obj.sstep, obj.stime)
def sweep_dim_3(obj, value):
ramp(obj, obj.sweep_par, value, obj.sstep, obj.stime)
# describe how data is to be stored
def record_data(kk, jj, ii, back):
'''This function is called with each change in ii,jj,kk
content: what to measure each time
'''
if recordD12:
D12.init_trigger() # Trigger and check D1 & D2
#print 'send trigger from loop'
vdata = vm.get_val() # aquire voltage data point
if back is True:
return DS.record_data2(vdata, kk, jj, ii)
# didnt implement backsweep with Digitizers yet
DS.record_data(vdata, kk, jj, ii)
if recordD12:
D12.full_aqc(kk, jj, ii) # Records and calc D1 & D2
#if (lsamples/BW > 30):
# save_recorded()
def save_recorded():
'''
Which functions to call to save the recored data
'''
DS.save_data() # save Volt data
if recordD12:
D12.data_save() # save Digitizer data
# go to default value and activate output
sweep_dim_1(dim_1, dim_1.defval)
sweep_dim_2(dim_2, dim_2.defval)
sweep_dim_3(dim_3, dim_3.defval)
dim_1.output(1)
dim_2.output(1)
dim_3.output(0)
print 'Executing sweep'
texp = (2.0*dim_3.pt*dim_2.pt*dim_1.pt*(0.032+corrAvg*lsamples/BW)/60.0)
# print 'req time (min):'+str(2.0*dim_3.pt*dim_2.pt*dim_1.pt*0.032/60)
print 'req time (min):' + str(texp)
t0 = time()
try:
for kk in range(dim_3.pt):
sweep_dim_3(dim_3, dim_3.lin[kk])
sweep_dim_2(dim_2, dim_2.start)
for jj in range(dim_2.pt):
sweep_dim_2(dim_2, dim_2.lin[jj])
sweep_dim_1(dim_1, dim_1.start)
sleep(0.2)
print 'Up Trace'
for ii in range(dim_1.pt):
#txx = time()
sweep_dim_1(dim_1, dim_1.lin[ii])
record_data(kk, jj, ii, False)
#print 'sweep+record ', time()-txx
if dim_1.UD is True:
sweep_dim_1(dim_1, dim_1.stop)
sleep(0.1)
print 'Down Trace'
for ii2 in range((dim_1.pt - 1), -1, -1):
sweep_dim_1(dim_1, dim_1.lin[ii2])
record_data(kk, jj, ii2, True)
save_recorded()
runt = time()-t0 # time run so far
avgtime = runt / ((kk+1)*(jj+1)*(ii+1)) # per point
t_rem = avgtime*dim_3.pt*dim_2.pt*dim_1.pt - runt # time left
print 'req time (h):' + str(t_rem / 3600) + ' pt: ' + str(avgtime)
print 'Measurement Finished'
finally:
print 'Time used min:' + str((time() - t0) / 60)
print 'Sweep back to default'
sweep_dim_1(dim_1, dim_1.defval)
sleep(1)
sweep_dim_2(dim_2, dim_2.defval)
sleep(1)
sweep_dim_3(dim_3, dim_3.defval)
sleep(1)
dim_1.output(0)
sleep(1)
dim_2.output(0)
sleep(1)
dim_3.output(0)
sim900._dconn()
gc.collect()
D1.performClose()
D2.performClose()
# sweep_dim_1(vBias, 0.0)
pFlux.output(0)
print 'done' | gpl-2.0 | -669,917,950,247,425,500 | 28.013453 | 78 | 0.609059 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/legacy/models/test_revision.py | 2 | 1565 | # encoding: utf-8
import datetime
from nose.tools import assert_equal
from ckan.tests.legacy import *
import ckan.model as model
# NB Lots of revision tests are part of vdm. No need to repeat those here.
class TestRevision:
@classmethod
def setup_class(cls):
# Create a test package
rev = model.repo.new_revision()
rev.author = 'Tester'
rev.timestamp = datetime.datetime(2020, 1, 1)
rev.approved_timestamp = datetime.datetime(2020, 1, 2)
rev.message = 'Test message'
pkg = model.Package(name='testpkg')
model.Session.add(pkg)
model.Session.commit()
model.Session.remove()
revs = model.Session.query(model.Revision).\
order_by(model.Revision.timestamp.desc()).all()
cls.rev = revs[0] # newest
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_revision_as_dict(self):
rev_dict = model.revision_as_dict(self.rev,
include_packages=True,
include_groups=True,
ref_package_by='name')
assert_equal(rev_dict['id'], self.rev.id)
assert_equal(rev_dict['author'], self.rev.author)
assert_equal(rev_dict['timestamp'], '2020-01-01T00:00:00')
assert_equal(rev_dict['approved_timestamp'], '2020-01-02T00:00:00')
assert_equal(rev_dict['message'], self.rev.message)
assert_equal(rev_dict['packages'], [u'testpkg'])
| gpl-3.0 | 6,738,866,383,545,032,000 | 33.021739 | 75 | 0.582748 | false |
feedhq/feedhq | feedhq/feeds/management/commands/add_missing.py | 1 | 1240 | from django.conf import settings
from . import SentryCommand
from ...models import enqueue_favicon, Feed, UniqueFeed
class Command(SentryCommand):
"""Updates the users' feeds"""
def handle_sentry(self, *args, **kwargs):
missing = Feed.objects.raw(
"""
select f.id, f.url
from
feeds_feed f
left join auth_user u on f.user_id = u.id
where
not exists (
select 1 from feeds_uniquefeed u where f.url = u.url
) and
u.is_suspended = false
""")
urls = set([f.url for f in missing])
UniqueFeed.objects.bulk_create([
UniqueFeed(url=url) for url in urls
])
if not settings.TESTS:
missing_favicons = UniqueFeed.objects.raw(
"""
select id, url from feeds_uniquefeed u
where
u.url != '' and
not exists (
select 1 from feeds_favicon f
where f.url = u.url
)
""")
for feed in missing_favicons:
enqueue_favicon(feed.url)
| bsd-3-clause | -4,859,175,894,789,571,000 | 30 | 72 | 0.472581 | false |
N07070/TelegramDTCBot | main.py | 1 | 1648 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import telebot # https://github.com/eternnoir/pyTelegramBotAPI
import random
from token_file import token_var
from DTCScrapper import DTCScrapper
TOKEN = token_var
bot = telebot.TeleBot(TOKEN)
about_text_bot = "Hey !\nI am a telegram bot built by @n07070. I'm open source on Github : https://github.com/N07070/TelegramDTCBot \nPlease contribute ! :)"
help_text_bot = "You can use theses commands :\n /about - Gets you information about me.\n /help - Gets you this help message.\n /quote - Gets you a random quote from danstonchat.com"
def quote():
"""
Gets a new quote from the DTC website
"""
e = DTCScrapper()
url_of_the_quote = "http://danstonchat.com/"+str(random.randint(1,16000))+".html"
final_quote = ""
iter = 0
for a in e.main(url_of_the_quote):
if iter % 2 == 0 :
final_quote += a
else:
final_quote += a + "\n"
iter += 1
print final_quote
return final_quote
del final_quote, a, e
def send_message(messages):
"""
Send the messages to the chat
"""
for m in messages:
chatid = m.chat.id
if m.content_type == 'text':
if m.text == "/quote":
text = ""
text = quote()
bot.send_message(chatid,text)
del text
if m.text == '/about':
bot.send_message(chatid,about_text_bot)
if m.text == '/help':
bot.send_message(chatid,help_text_bot)
bot.set_update_listener(send_message)
bot.polling()
while True: # Don't let the main Thread end.
pass
| gpl-3.0 | -7,595,758,703,366,299,000 | 28.428571 | 183 | 0.589806 | false |
stormi/tsunami | src/secondaires/rapport/commandes/rapport/editer.py | 1 | 3588 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'editer' de la commande 'rapport'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.interpreteur.editeur.presentation import Presentation
class PrmEditer(Parametre):
"""Commande 'rapport edit'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "editer", "edit")
self.schema = "<nombre>"
self.aide_courte = "ouvre l'éditeur de rapport"
self.aide_longue = \
"Cette commande ouvre l'éditeur de rapport pour vous permettre " \
"d'éditer un rapport existant."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
id = dic_masques["nombre"].nombre
try:
rapport = importeur.rapport.rapports[id]
except KeyError:
if personnage.est_immortel():
personnage << "|err|Ce rapport n'existe pas.|ff|"
else:
personnage << "|err|Vous ne pouvez éditer ce rapport.|ff|"
else:
if not personnage.est_immortel() and rapport.createur is not \
personnage:
personnage << "|err|Vous ne pouvez éditer ce rapport.|ff|"
return
elif not personnage.est_immortel():
if rapport.statut != "nouveau":
personnage << "|err|Vous ne pouvez éditer ce rapport.|ff|"
return
c_rapport = importeur.rapport.creer_rapport("titre",
personnage, ajouter=False)
c_rapport.copier(rapport)
editeur = importeur.interpreteur.construire_editeur(
"bugedit", personnage, c_rapport)
else:
editeur = importeur.interpreteur.construire_editeur(
"bugedit+", personnage, rapport)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| bsd-3-clause | -8,267,727,388,092,427,000 | 45.467532 | 79 | 0.661543 | false |
barbieauglend/Learning_python | Lvl02/WordGuess/WordGuess2.py | 1 | 1661 | # python 3.5
import getpass
from random import shuffle
def player_one():
docs = open('GPL.txt').read().split()
shuffle(docs)
wort = docs[1]
return wort
player_input = input('1 or 2 players?: ')
player = int(player_input)
word = ''
while word == '':
if player == 1:
while len(word) < 3:
word = player_one()
elif player == 2:
word = getpass.getpass('Word: ')
else:
player_input = input('1 or 2 players?: ')
player = int(player_input)
tries = 3
guessed_letters = []
playing = True
hidden_word = word
for letter in word[1:len(word) - 1]:
hidden_word = hidden_word.replace(letter, ' __ ')
print (hidden_word)
while playing:
gamer_input = input('Your guess: ')
hidden_word = word
if not gamer_input:
print ('Choose a letter to guess!')
elif len(gamer_input) == 1 and gamer_input in 'qwertzuiopasdfghjklyxcvbnm':
if gamer_input not in guessed_letters:
guessed_letters.append(gamer_input)
for letter in word[1:len(word) - 1]:
if letter not in guessed_letters:
hidden_word = hidden_word.replace(letter, ' __ ')
print (hidden_word)
if gamer_input not in word:
tries -= 1
print('Wrong! You have ', tries, 'more tries/try.')
if tries == 0:
print('Game Over!')
playing = False
else:
print('Good job!')
if word == hidden_word:
print('You won!')
break
else:
print('Choose another letter: ')
| unlicense | 817,038,441,491,924,100 | 24.553846 | 79 | 0.53221 | false |
jgrantdev/AoC2015 | day6/day6puzzle1.py | 1 | 3295 | def main():
input_file_name = "day6input.txt"
with open(input_file_name) as input_file:
lines = input_file.readlines()
grid_width = 1000
grid_height = 1000
light_grid = LightGrid(grid_width, grid_height)
for line in lines:
command, top_left, bottom_right = parse_command(line)
if command == 'toggle':
light_grid.toggle_section(top_left, bottom_right)
elif command == 'turn on':
light_grid.turn_on_section(top_left, bottom_right)
elif command == 'turn off':
light_grid.turn_off_section(top_left, bottom_right)
print('Lights off: ' + str(light_grid.num_lights_off()))
print('Lights on: ' + str(grid_width * grid_height - light_grid.num_lights_off()))
def parse_command(line):
if 'toggle' in line:
_, top_left, _, bottom_right = line.split()
top_left, bottom_right = get_coords(top_left, bottom_right)
return 'toggle', top_left, bottom_right
elif 'turn on' in line:
_, _, top_left, _, bottom_right = line.split()
top_left, bottom_right = get_coords(top_left, bottom_right)
return 'turn on', top_left, bottom_right
elif 'turn off' in line:
_, _, top_left, _, bottom_right = line.split()
top_left, bottom_right = get_coords(top_left, bottom_right)
return 'turn off', top_left, bottom_right
def get_coords(top_left, bottom_right):
top_left_x, top_left_y = top_left.split(',')
bottom_right_x, bottom_right_y = bottom_right.split(',')
return (int(top_left_x), int(top_left_y)), (int(bottom_right_x), int(bottom_right_y))
class LightGrid:
OFF = 0
ON = 1
def __init__(self, size_x, size_y):
self.light_grid = []
for i in range(size_x):
temp_inner_list = []
for j in range(size_y):
temp_inner_list.append(LightGrid.OFF)
self.light_grid.append(temp_inner_list)
def num_lights_off(self):
lights_off = 0
for row in self.light_grid:
for light in row:
if light == LightGrid.OFF: lights_off += 1
return lights_off
def is_on(self, x, y):
return self.light_grid[x][y] == LightGrid.ON
def is_off(self, x, y):
return self.light_grid[x][y] == LightGrid.OFF
def turn_on(self, x, y):
self.light_grid[x][y] = LightGrid.ON
def turn_off(self, x, y):
self.light_grid[x][y] = LightGrid.OFF
def toggle(self, x, y):
if self.is_on(x, y):
self.turn_off(x, y)
else:
self.turn_on(x, y)
def toggle_section(self, top_left, bottom_right):
for col in range(top_left[0], bottom_right[0]+1):
for light in range(top_left[1], bottom_right[1]+1):
self.toggle(col, light)
def turn_off_section(self, top_left, bottom_right):
for col in range(top_left[0], bottom_right[0]+1):
for light in range(top_left[1], bottom_right[1]+1):
self.turn_off(col, light)
def turn_on_section(self, top_left, bottom_right):
for col in range(top_left[0], bottom_right[0]+1):
for light in range(top_left[1], bottom_right[1]+1):
self.turn_on(col, light)
if __name__ == '__main__':
main()
| mit | -5,473,235,493,828,304,000 | 32.969072 | 89 | 0.575417 | false |
automl/HPOlib | HPOlib/wrapping.py | 2 | 24657 | ##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import imp
import logging
import psutil
import os
from Queue import Queue, Empty
import signal
import shlex
import shutil
import subprocess
import sys
from threading import Thread
import time
import warnings
import HPOlib
import HPOlib.check_before_start as check_before_start
import HPOlib.wrapping_util as wrapping_util
import HPOlib.dispatcher.runsolver_wrapper as runsolver_wrapper
# Import experiment only after the check for numpy succeeded
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
INFODEVEL = """
##############################################################################
# Your are using the DEVELOPMENT version. This means we might change things #
# on a daily basis, commit untested code and remove or add features without #
# announcements. We do not intend to break any functionality, but cannot #
# guarantee to not do it. #
##############################################################################
"""
IS_DEVELOPMENT = True
hpolib_logger = logging.getLogger("HPOlib")
logger = logging.getLogger("HPOlib.wrapping")
def calculate_wrapping_overhead(trials):
wrapping_time = 0
for times in zip(trials.cv_starttime, trials.cv_endtime):
wrapping_time += times[1] - times[0]
# We need to import numpy again
import numpy as np
benchmark_time = 0
for t in trials.trials:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
benchmark_time += np.nansum(t['instance_durations'])
wrapping_time = wrapping_time - benchmark_time
return wrapping_time
def calculate_optimizer_time(trials):
optimizer_time = []
time_idx = 0
optimizer_time.append(trials.cv_starttime[0] - trials.starttime[time_idx])
for i in range(len(trials.cv_starttime[1:])):
if trials.cv_starttime[i + 1] > trials.endtime[time_idx]:
optimizer_time.append(trials.endtime[time_idx] -
trials.cv_endtime[i])
time_idx += 1
optimizer_time.append(trials.cv_starttime[i + 1] -
trials.starttime[time_idx])
else:
optimizer_time.append(trials.cv_starttime[i + 1] -
trials.cv_endtime[i])
optimizer_time.append(trials.endtime[time_idx] - trials.cv_endtime[-1])
trials.optimizer_time = optimizer_time
# We need to import numpy again
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nansum(optimizer_time)
def use_arg_parser():
"""Parse all options which can be handled by the wrapping script.
Unknown arguments are ignored and returned as a list. It is useful to
check this list in your program to handle typos etc.
Returns:
a tuple. The first element is an argparse.Namespace object,
the second a list with all unknown arguments.
"""
description = "Perform an experiment with HPOlib. " \
"Call this script from experiment directory (containing 'config.cfg')"
epilog = "Your are using HPOlib " + HPOlib.__version__
prog = "path/from/Experiment/to/HPOlib/wrapping.py"
parser = ArgumentParser(description=description, prog=prog, epilog=epilog)
parser.add_argument("-o", "--optimizer", action="store", type=str,
dest="optimizer",
help="Specify the optimizer name.", required=True)
parser.add_argument("-p", "--print", action="store_true",
dest="printcmd", default=False,
help="If set print the command instead of executing it")
parser.add_argument("-s", "--seed", action="store", type=int,
dest="seed", default=1,
help="Set the seed of the optimizer")
parser.add_argument("-t", "--title", action="store", type=str,
dest="title", default=None,
help="A title for the experiment")
parser.add_argument("--cwd", action="store", type=str, dest="working_dir",
default=None, help="Change the working directory to "
"<working_directory> prior to running the experiment")
parser.add_argument("-r", "--restore", action="store", type=str,
dest="restore", default=None,
help="Restore the state from a given directory")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--silent", action="store_true",
dest="silent", default=False,
help="Don't print anything during optimization")
group.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Print stderr/stdout for optimizer")
args, unknown = parser.parse_known_args()
return args, unknown
def main():
"""Start an optimization of the HPOlib. For documentation see the
comments inside this function and the general HPOlib documentation."""
args, unknown_arguments = use_arg_parser()
if args.working_dir:
experiment_dir = args.working_dir
elif args.restore:
args.restore = os.path.abspath(args.restore) + "/"
experiment_dir = args.restore
else:
experiment_dir = os.getcwd()
formatter = logging.Formatter('[%(levelname)s] [%(asctime)s:%(name)s] %('
'message)s', datefmt='%H:%M:%S')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
hpolib_logger.addHandler(handler)
hpolib_logger.setLevel(1)
# First of all print the infodevel
if IS_DEVELOPMENT:
logger.critical(INFODEVEL)
args, unknown_arguments = use_arg_parser()
# Convert the path to the optimizer to be an absolute path, which is
# necessary later when we change the working directory
optimizer = args.optimizer
print("opti:", optimizer)
if not os.path.isabs(optimizer):
relative_path = optimizer
optimizer = os.path.abspath(optimizer)
logger.info("Converting relative optimizer path %s to absolute "
"optimizer path %s.", relative_path, optimizer)
os.chdir(experiment_dir)
experiment_dir = os.getcwd()
check_before_start.check_first(experiment_dir)
# Now we can safely import non standard things
import numpy as np
import HPOlib.Experiment as Experiment # Wants numpy and scipy
# Check how many optimizer versions are present and if all dependencies
# are installed also dynamically load optimizer obj
optimizer_version, opt_obj = check_before_start.check_optimizer(optimizer)
logger.warning("You called -o %s, I am using optimizer defined in "
"%sDefault.cfg", optimizer, optimizer_version)
optimizer = os.path.basename(optimizer_version)
config = wrapping_util.get_configuration(experiment_dir,
optimizer_version, unknown_arguments, opt_obj)
# DO NOT LOG UNTIL HERE UNLESS SOMETHING DRAMATIC HAS HAPPENED!!!
loglevel = config.getint("HPOLIB", "HPOlib_loglevel")
hpolib_logger.setLevel(loglevel)
if args.silent:
hpolib_logger.setLevel(60)
if args.verbose:
hpolib_logger.setLevel(10)
# Saving the config file is down further at the bottom, as soon as we get
# hold of the new optimizer directory
# wrapping_dir = os.path.dirname(os.path.realpath(__file__))
# Load optimizer
try:
optimizer_dir = os.path.dirname(os.path.realpath(optimizer_version))
optimizer_module = imp.load_source(optimizer_dir, optimizer_version + ".py")
except (ImportError, IOError):
logger.critical("Optimizer module %s not found", optimizer)
import traceback
logger.critical(traceback.format_exc())
sys.exit(1)
# So the optimizer module can acces the seed from the config and
config.set("HPOLIB", "seed", str(args.seed))
experiment_directory_prefix = config.get("HPOLIB", "experiment_directory_prefix")
optimizer_call, optimizer_dir_in_experiment = \
opt_obj.main(config=config, options=args,
experiment_dir=experiment_dir)
# experiment_directory_prefix=experiment_directory_prefix)
cmd = optimizer_call
# Start the server for logging from subprocesses here, because its port must
# be written to the config file.
logging_host = config.get("HPOLIB", "logging_host")
if logging_host:
logging_receiver_thread = None
default_logging_port = DEFAULT_TCP_LOGGING_PORT
for logging_port in range(default_logging_port, 65535):
try:
logging_receiver = logging_server.LoggingReceiver(
host=logging_host, port=logging_port,
handler=logging_server.LogRecordStreamHandler)
logging_receiver_thread = Thread(target=logging_receiver.serve_forever)
logging_receiver_thread.daemon = True
logger.info('%s started at %s' % (
logging_receiver.__class__.__name__,
logging_receiver.server_address))
logging_receiver_thread.start()
break
# TODO I did not find any useful documentation about which Exceptions
# I should catch here...
except Exception as e:
logger.debug(e)
logger.debug(e.message)
if logging_receiver_thread is None:
logger.critical("Could not create the logging server. Going to shut "
"down.")
sys.exit(1)
config.set("HPOLIB", "logging_port", str(logging_port))
with open(os.path.join(optimizer_dir_in_experiment, "config.cfg"), "w") as f:
config.set("HPOLIB", "is_not_original_config_file", "True")
wrapping_util.save_config_to_file(f, config, write_nones=True)
# initialize/reload pickle file
if args.restore:
try:
os.remove(os.path.join(optimizer_dir_in_experiment, optimizer + ".pkl.lock"))
except OSError:
pass
folds = config.getint('HPOLIB', 'number_cv_folds')
trials = Experiment.Experiment(expt_dir=optimizer_dir_in_experiment,
expt_name=experiment_directory_prefix + optimizer,
folds=folds,
max_wallclock_time=config.get('HPOLIB',
'cpu_limit'),
title=args.title)
trials.optimizer = optimizer_version
optimizer_output_file = os.path.join(optimizer_dir_in_experiment, optimizer + wrapping_util.get_time_string() +
"_" + str(args.seed) + ".out")
if args.restore:
# noinspection PyBroadException
try:
restored_runs = optimizer_module.restore(config=config,
optimizer_dir=optimizer_dir_in_experiment,
cmd=cmd)
except:
logger.critical("Could not restore runs for %s", args.restore)
import traceback
logger.critical(traceback.format_exc())
sys.exit(1)
logger.info("Restored %d runs", restored_runs)
trials.remove_all_but_first_runs(restored_runs)
fh = open(optimizer_output_file, "a")
fh.write("#" * 80 + "\n" + "Restart! Restored %d runs.\n" % restored_runs)
fh.close()
if len(trials.endtime) < len(trials.starttime):
trials.endtime.append(trials.cv_endtime[-1])
trials.starttime.append(time.time())
else:
trials.starttime.append(time.time())
# noinspection PyProtectedMember
trials._save_jobs()
del trials
sys.stdout.flush()
# Run call
if args.printcmd:
logger.info(cmd)
return 0
else:
# Create a second formatter and handler to customize the optimizer
# output
optimization_formatter = logging.Formatter(
'[%(levelname)s] [%(asctime)s:%(optimizer)s] %(message)s',
datefmt='%H:%M:%S')
optimization_handler = logging.StreamHandler(sys.stdout)
optimization_handler.setFormatter(optimization_formatter)
optimization_logger = logging.getLogger(optimizer)
optimization_logger.addHandler(optimization_handler)
optimizer_loglevel = config.getint("HPOLIB", "optimizer_loglevel")
optimization_logger.setLevel(optimizer_loglevel)
# Use a flag which is set to true as soon as all children are
# supposed to be killed
exit_ = wrapping_util.Exit()
signal.signal(signal.SIGTERM, exit_.signal_callback)
signal.signal(signal.SIGABRT, exit_.signal_callback)
signal.signal(signal.SIGINT, exit_.signal_callback)
signal.signal(signal.SIGHUP, exit_.signal_callback)
# Change into the current experiment directory
# Some optimizer might expect this
dir_before_exp = os.getcwd()
temporary_output_dir = config.get("HPOLIB", "temporary_output_directory")
if temporary_output_dir:
last_part = os.path.split(optimizer_dir_in_experiment)[1]
temporary_output_dir = os.path.join(temporary_output_dir, last_part)
# Replace any occurence of the path in the command
cmd = cmd.replace(optimizer_dir_in_experiment, temporary_output_dir)
optimizer_output_file = optimizer_output_file.replace(optimizer_dir_in_experiment, temporary_output_dir)
shutil.copytree(optimizer_dir_in_experiment, temporary_output_dir)
# shutil.rmtree does not work properly with NFS
# https://github.com/hashdist/hashdist/issues/113
# Idea from https://github.com/ahmadia/hashdist/
for rmtree_iter in range(5):
try:
shutil.rmtree(optimizer_dir_in_experiment)
break
except OSError, e:
time.sleep(rmtree_iter)
optimizer_dir_in_experiment = temporary_output_dir
# call target_function.setup()
fn_setup = config.get("HPOLIB", "function_setup")
if fn_setup:
# if temporary_output_dir:
# logger.critical("The options 'temporary_output_directory' "
# "and 'function_setup' cannot be used "
# "together.")
# sys.exit(1)
fn_setup_output = os.path.join(optimizer_dir_in_experiment,
"function_setup.out")
runsolver_cmd = runsolver_wrapper._make_runsolver_command(
config, fn_setup_output)
setup_cmd = runsolver_cmd + " " + fn_setup
# runsolver_output = subprocess.STDOUT
runsolver_output = open("/dev/null")
runsolver_wrapper._run_command_with_shell(setup_cmd,
runsolver_output)
os.chdir(optimizer_dir_in_experiment)
logger.info(cmd)
output_file = optimizer_output_file
fh = open(output_file, "a")
cmd = shlex.split(cmd)
print cmd
# See man 7 credentials for the meaning of a process group id
# This makes wrapping.py useable with SGEs default behaviour,
# where qdel sends a SIGKILL to a whole process group
# logger.info(os.getpid())
# os.setpgid(os.getpid(), os.getpid()) # same as os.setpgid(0, 0)
# TODO: figure out why shell=True was removed in commit f47ac4bb3ffe7f70b795d50c0828ca7e109d2879
# maybe it has something todo with the previous behaviour where a
# session id was set...
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
global child_process_pid
child_process_pid = proc.pid
process = psutil.Process(os.getpid())
logger.info("-----------------------RUNNING----------------------------------")
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# How often is the experiment pickle supposed to be opened?
if config.get("HPOLIB", "total_time_limit"):
optimizer_end_time = time.time() + config.getint("HPOLIB", "total_time_limit")
else:
optimizer_end_time = sys.float_info.max
sent_SIGINT = False
sent_SIGINT_time = np.inf
sent_SIGTERM = False
sent_SIGTERM_time = np.inf
sent_SIGKILL = False
sent_SIGKILL_time = np.inf
children_to_kill = list()
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
stderr_queue = Queue()
stdout_queue = Queue()
stderr_thread = Thread(target=enqueue_output, args=(proc.stderr, stderr_queue))
stdout_thread = Thread(target=enqueue_output, args=(proc.stdout, stdout_queue))
stderr_thread.daemon = True
stdout_thread.daemon = True
stderr_thread.start()
stdout_thread.start()
if not (args.verbose or args.silent):
logger.info('Optimizer runs with PID: %d', proc.pid)
logger.info('We start in directory %s', os.getcwd())
while True:
# this implements the total runtime limit
if time.time() > optimizer_end_time and not sent_SIGINT:
logger.info("Reached total_time_limit, going to shutdown.")
exit_.true()
# necessary, otherwise HPOlib-run takes 100% of one processor
time.sleep(0.25)
try:
while True:
line = stdout_queue.get_nowait()
fh.write(line)
fh.flush()
optimization_logger.info(line.replace("\n", ""),
extra={'optimizer': optimizer})
except Empty:
pass
try:
while True:
line = stderr_queue.get_nowait()
fh.write(line)
fh.flush()
optimization_logger.error(line.replace("\n", ""),
extra={'optimizer': optimizer})
except Empty:
pass
ret = proc.poll()
if ret is not None:
# This does not include wrapping.py
children = process.children()
if len(children) == 0:
break
# TODO: what happens if we have a ret but something is still
# running?
if exit_.get_exit() == True and not sent_SIGINT:
logger.critical("Shutdown procedure: Sending SIGINT")
wrapping_util.kill_processes(signal.SIGINT)
sent_SIGINT_time = time.time()
sent_SIGINT = True
if exit_.get_exit() == True and not sent_SIGTERM and time.time() \
> sent_SIGINT_time + 5:
logger.critical("Shutdown procedure: Sending SIGTERM")
wrapping_util.kill_processes(signal.SIGTERM)
sent_SIGTERM_time = time.time()
sent_SIGTERM = True
if exit_.get_exit() == True and not sent_SIGKILL and time.time() \
> sent_SIGTERM_time + 5:
logger.critical("Shutdown procedure: Sending SIGKILL")
wrapping_util.kill_processes(signal.SIGKILL)
sent_SIGKILL_time = time.time()
sent_SIGKILL = True
logger.info("-----------------------END--------------------------------------")
ret = proc.returncode
logger.info("Finished with return code: %d", ret)
del proc
fh.close()
# Change back into to directory
os.chdir(dir_before_exp)
# call target_function.setup()
fn_teardown = config.get("HPOLIB", "function_teardown")
if fn_teardown:
# if temporary_output_dir:
# logger.critical("The options 'temporary_output_directory' "
# "and 'function_teardown' cannot be used "
# "together.")
# sys.exit(1)
fn_teardown_output = os.path.join(optimizer_dir_in_experiment,
"function_teardown.out")
runsolver_cmd = runsolver_wrapper._make_runsolver_command(
config, fn_teardown_output)
teardown_cmd = runsolver_cmd + " " + fn_teardown
# runsolver_output = subprocess.STDOUT
runsolver_output = open("/dev/null")
runsolver_wrapper._run_command_with_shell(teardown_cmd,
runsolver_output)
if temporary_output_dir:
# We cannot be sure that the directory
# optimizer_dir_in_experiment in dir_before_exp got deleted
# properly, therefore we append an underscore to the end of the
# filename
last_part = os.path.split(optimizer_dir_in_experiment)[1]
new_dir = os.path.join(dir_before_exp, last_part)
try:
shutil.copytree(optimizer_dir_in_experiment, new_dir)
except OSError as e:
new_dir += "_"
shutil.copytree(optimizer_dir_in_experiment, new_dir)
# shutil.rmtree does not work properly with NFS
# https://github.com/hashdist/hashdist/issues/113
# Idea from https://github.com/ahmadia/hashdist/
for rmtree_iter in range(5):
try:
shutil.rmtree(optimizer_dir_in_experiment)
break
except OSError, e:
time.sleep(rmtree_iter)
optimizer_dir_in_experiment = new_dir
trials = Experiment.Experiment(optimizer_dir_in_experiment,
experiment_directory_prefix + optimizer)
trials.endtime.append(time.time())
# noinspection PyProtectedMember
trials._save_jobs()
# trials.finish_experiment()
total_time = 0
logger.info("Best result %f", trials.get_best())
logger.info("Durations")
try:
for starttime, endtime in zip(trials.starttime, trials.endtime):
total_time += endtime - starttime
logger.info(" Needed a total of %f seconds", total_time)
logger.info(" The optimizer %s took %10.5f seconds",
optimizer, float(calculate_optimizer_time(trials)))
logger.info(" The overhead of HPOlib is %f seconds",
calculate_wrapping_overhead(trials))
logger.info(" The benchmark itself took %f seconds" % \
trials.total_wallclock_time)
except Exception as e:
logger.error(HPOlib.wrapping_util.format_traceback(sys.exc_info()))
logger.error("Experiment itself went fine, but calculating "
"durations of optimization failed: %s %s",
sys.exc_info()[0], e)
del trials
logger.info("Finished with return code: " + str(ret))
return ret
if __name__ == "__main__":
main()
| gpl-3.0 | 1,399,030,148,345,264,600 | 41.148718 | 116 | 0.583769 | false |
tri2sing/PyOO | patterns/command/invokers.py | 1 | 1312 | '''
Created on Nov 29, 2015
@author: Sameer Adhikari
'''
# Classes the initiate the chain that leads to the receivers doing their work.
# These classes are only aware that they have to invoke the abstract command.
# These classes have no visibility into how the commands connects to receivers.
class ToolbarButton(object):
def __init__(self, name, icon):
self.name = name
self.icon = icon
def click(self):
# invoke the abstract command without knowing how the command will be set
print('Simulating the click of a toolbar button')
self.command.execute()
class MenuItem(object):
def __init__(self, menu_name, menu_item_name):
self.menu = menu_name
self.item = menu_item_name
def choose(self):
# invoke the abstract command without knowing how the command will be set
print('Simulating the selction of a menu item')
self.command.execute()
class KeyboardCombination(object):
def __init__(self, key, modifier):
self.key = key
self.modifier = modifier
def keypress(self):
# invoke the abstract command without knowing how the command will be set
print('Simulating the press of a keyboard combination')
self.command.execute()
| gpl-2.0 | 8,838,153,321,956,066,000 | 31.8 | 81 | 0.653963 | false |
benschneider/sideprojects1 | hdf5_to_mtx/load_S1_511_shot.py | 1 | 1651 | import numpy as np
from parsers import load_hdf5, dim
from parsers import savemtx, make_header
# import matplotlib.pyplot as plt
# from changeaxis import interp_y
# from scipy.constants import Boltzmann as Kb
# from scipy.constants import h , e, pi
filein = "S1_511_shot_100mV_4924_5217MHz"
folder = "hdf5s//09//Data_0915//"
d = load_hdf5(folder+filein+'.hdf5')
# # meas specific to change mag field to flux
# # simply comment this paragraph out
# xoff = 140.5e-3 # 139.3e-3
# x1flux = 479.6e-3
# d.n2.lin = (d.n2.lin-xoff)/x1flux + 0.5
# d.n2.start = d.n2.lin[0]
# d.n2.stop = d.n2.lin[-1]
# d.n2.name = 'Flux/Flux0'
d.n2 = [dim(name=d.stepInst[0],
start=sPar[3],
stop=sPar[4],
pt=sPar[8],
scale=1)
for sPar in d.stepItems[0]]
def search(chanList, searchString):
for i, k in enumerate(chanList):
if searchString in k:
return i, k
return None
def get_MP(d, chnum):
compx = 1j*d.data[:, chnum+1, :]
compx += d.data[:, chnum, :]
return np.abs(compx), np.angle(compx)
MAT1 = np.zeros([7, d.shape[0], d.shape[1]])
MAT1[0] = d.data[:, 1, :]
MAT1[1] = d.data[:, 2, :]
MAT1[2], MAT1[3] = get_MP(d, 7)
MAT1[4], MAT1[5] = get_MP(d, 9)
MAT1[-1] = d.data[:, -1, :]
M2 = np.zeros((7, d.n2[0].pt, d.n3.pt))
M3 = np.zeros((7, d.n2[1].pt, d.n3.pt))
M3 = MAT1[:, :d.n2[0].pt, :]
M2 = MAT1[:, d.n2[0].pt-1:, :]
header2 = make_header(d.n3, d.n2[1], d.n1, meas_data=('a.u.'))
header1 = make_header(d.n3, d.n2[0], d.n1, meas_data=('a.u.'))
savemtx('mtx_out//' + filein + '.mtx', M3, header=header1)
savemtx('mtx_out//' + filein + '2' + '.mtx', M2, header=header2)
| gpl-2.0 | -2,156,807,183,853,500,700 | 28.482143 | 64 | 0.591763 | false |
vied12/superdesk | server/superdesk/io/zczc.py | 1 | 3409 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license*.
from superdesk.io import Parser
from superdesk.errors import ParserError
from .iptc import subject_codes
from superdesk.utc import utcnow
import uuid
class ZCZCParser(Parser):
"""
It is expected that the stories contained in the files will be framed by the strings
ZCZC
NNNN
* the NNNN is optional
"""
START_OF_MESSAGE = 'ZCZC'
END_OF_MESSAGE = 'NNNN'
CATEGORY = '$'
KEYWORD = ':'
TAKEKEY = '='
HEADLINE = '^'
# *format "X" text "T" tabular
FORMAT = '*'
# &service level - Default A but for results should match category
SERVICELEVEL = '&'
# +IPTC Subject Reference Number as defined in the SubjectReference.ini file
IPTC = '+'
# Posible values for formsat
TEXT = 'X'
TABULAR = 'T'
header_map = {KEYWORD: 'slugline', TAKEKEY: 'anpa_take_key',
HEADLINE: 'headline', SERVICELEVEL: None}
def can_parse(self, filestr):
return self.START_OF_MESSAGE in filestr
def parse_file(self, filename, provider):
try:
item = {}
self.set_item_defaults(item)
with open(filename, 'r', encoding='ascii') as f:
lines = f.readlines()
header = False
for line in lines:
if self.START_OF_MESSAGE in line and not header:
item['guid'] = filename + str(uuid.uuid4())
header = True
continue
if header:
if line[0] in self.header_map:
if self.header_map[line[0]]:
item[self.header_map[line[0]]] = line[1:-1]
continue
if line[0] == self.CATEGORY:
item['anpa-category'] = {'qcode': line[1]}
continue
if line[0] == self.FORMAT:
if line[1] == self.TEXT:
item['type'] = 'text'
continue
if line[1] == self.TABULAR:
item['type'] = 'preformatted'
continue
continue
if line[0] == self.IPTC:
iptc_code = line[1:-1]
item['subject'] = [{'qcode': iptc_code, 'name': subject_codes[iptc_code]}]
continue
header = False
item['body_html'] = line
else:
if self.END_OF_MESSAGE in line:
break
item['body_html'] = item['body_html'] + line
return item
except Exception as ex:
raise ParserError.ZCZCParserError(ex, provider)
def set_item_defaults(self, item):
item['type'] = 'text'
item['urgency'] = '5'
item['pubstatus'] = 'usable'
item['versioncreated'] = utcnow()
| agpl-3.0 | 6,435,283,647,622,846,000 | 33.434343 | 102 | 0.485186 | false |
CDIPS-AI-2017/pensieve | pensieve/json_dump.py | 1 | 3188 | import json
import os
from datetime import datetime
CONCEPT_MAP = {'people': 'Person',
'places': 'Place',
'things': 'Thing',
'activities': 'Activity',
'times': 'Time',
'mood_words': 'Mood'}
def dump_mem_to_json(mem_dict, save=None):
"""
Convert mem_dict into a JSON file following the schema and write
Args:
mem_dict: dictionary of mem information
save: path to save JSON to [default: None]
Returns:
mem_json: JSON object for memory
"""
node = {'name': '',
'label': '',
'imageURL': mem_dict.get('img_url', ''),
'iconURL': '',
'created': '',
'updated': ''}
default_mood_weights = {'weight': 0.5,
'joy': 0.0,
'fear': 0.0,
'surprise': 0.0,
'sadness': 0.0,
'disgust': 0.0,
'anger': 0.0}
relation = mem_dict.get('mood_weight', default_mood_weights)
concepts = []
for concept_type, concept_items in mem_dict.items():
if concept_items is None:
continue
if concept_type in ('img_url', 'narrative', 'mood_weight'):
continue
for concept_item in concept_items:
clean_text = concept_item.replace(' ', '_')
clean_text = clean_text.lower()
concept = {'node': {}, 'relation': {}}
concept['node'] = {'concept': CONCEPT_MAP[concept_type],
'name': clean_text,
'label': '',
'iconURL': '',
'imageURL': ''}
concept_relation = 'Has_{}'.format(concept['node']['concept'])
concept['relation'] = {'relation': concept_relation,
'name': '',
'iconURL': '',
'imageURL': '',
'weight': 0.5,
'created': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'updated': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'originType': 'OriginUserDefined',
'joy': 0.0,
'fear': 0.0,
'surprise': 0.0,
'sadness': 0.0,
'disgust': 0.0,
'anger': 0.0}
concepts.append(concept)
narrative = {'node': {'name': '',
'label': 'title',
'text': mem_dict['narrative']},
'relation': {'weight': 0.5}}
mem = {'memory': '',
'node': node,
'relation': relation,
'concepts': concepts,
'narrative': narrative}
if save is not None:
with open(os.path.abspath(save), 'w') as f:
json.dump(mem, f)
return dict(mem)
| apache-2.0 | -978,367,820,116,201,100 | 37.409639 | 99 | 0.395859 | false |
minggli/chatbot | chatbot/settings.py | 1 | 6860 | """
settings
a repository to configure various parts of the app
"""
import os
import sys
import json
import configparser
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sys.setrecursionlimit(30000)
def build_config(filename):
"""create a configparser object and write a ini file."""
config = configparser.ConfigParser(allow_no_value=True)
config['GENERAL'] = dict()
config['GENERAL']['ENGINE'] = 'NLTK'
config['GENERAL']['FORCE'] = 'false'
config['GENERAL']['DATA_LOCATION'] = 'cache/'
config['API'] = dict()
config['API']['BASE_URL'] = '/chatbot/api/v1'
config['API']['PORT_ASK'] = '5000'
config['API']['PORT_SYMPTOMS'] = '5001'
config['ENGINE'] = dict()
config['ENGINE']['MAX_WORDS'] = ''
config['ENGINE']['BATCH_SIZE'] = '500'
config['ENGINE']['STATE_SIZE'] = '128'
config['ENGINE']['STEP_SIZE'] = '60'
config['ENGINE']['MAX_STEPS'] = '3000'
config['ENGINE']['VERBOSE'] = 'false'
config['WEBDATA'] = dict()
config['WEBDATA']['BASE_URL'] = 'http://www.nhs.uk'
config['WEBDATA']['META'] = """
{"desc_attributes": {"name": "description"},
"subj_attributes": {"name": "DC.title"},
"article_attributes": {
"start_t_0": "Health A-Z",
"start_t_1": "Home",
"start_t_2": "Create an account",
"end_t_0": "NHS sites",
"end_t_1": "About us",
"end_t_2": "Contact us"}
}"""
config['NLP'] = dict()
config['NLP']['PROCESS'] = """
{"pipeline":
{"pos": true, "stop": true, "lemma": true},
"part_of_speech_exclude":
["ADP", "PUNCT", "DET", "CONJ", "PART", "PRON", "SPACE"]
}"""
config['NLP']['CONTRACTIONS'] = """
{"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he has",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "I would",
"i'd've": "I would have",
"i'll": "I will",
"i'll've": "I will have",
"i'm": "I am",
"i've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she has",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}"""
with open(filename, 'w') as f:
config.write(f)
CONFIGFILE = os.getenv('CONFIGFILE', default='./config.ini')
config = configparser.ConfigParser(allow_no_value=True)
if not os.path.exists(CONFIGFILE):
build_config(CONFIGFILE)
config.read(CONFIGFILE)
ENGINE = os.getenv('ENGINE', default=config['GENERAL']['ENGINE'])
MAX_STEPS = int(os.getenv('STEPS',
default=config.getint('ENGINE', 'MAX_STEPS')))
FORCE = bool(os.getenv('FORCE',
default=config.getboolean('GENERAL', 'FORCE')))
VERBOSE = bool(os.getenv('VERBOSE',
default=config.getboolean('ENGINE', 'VERBOSE')))
WEB_BASE_URL = config['WEBDATA']['BASE_URL']
WEB_METAKEY = json.loads(config['WEBDATA']['META'])
BASE_URL = config['API']['BASE_URL']
PORT_ASK = config['API']['PORT_ASK']
PORT_SYMPTOMS = config['API']['PORT_SYMPTOMS']
MAX_WORDS = int(config['ENGINE']['MAX_WORDS']) \
if config['ENGINE']['MAX_WORDS'] else None
BATCH_SIZE = config.getint('ENGINE', 'BATCH_SIZE')
STATE_SIZE = config.getint('ENGINE', 'STATE_SIZE')
STEP_SIZE = config.getint('ENGINE', 'STEP_SIZE')
NLP_ATTRS = json.loads(config['NLP']['PROCESS'])
NLP_CONTRACTIONS = json.loads(config['NLP']['CONTRACTIONS'])
APP_CONFIG = {
'SECRET_KEY': '\x9c\xedh\xdf\x8dS\r\xe3]\xc3\xd3\xbd\x0488\xfc\xa6<\xfe'
'\x94\xc8\xe0\xc7\xdb',
'SESSION_COOKIE_NAME': 'chatbot_session',
'DEBUG': False
}
class CacheSettings(object):
path = config['GENERAL']['DATA_LOCATION']
index = path + 'index_pages.pkl'
symptoms = path + 'symptoms.pkl'
processed_data = path + 'nlp_data.pkl'
@classmethod
def check(CacheSettings, filename):
return True if os.path.exists(filename) else False
| mit | -400,032,840,587,508,350 | 29.087719 | 76 | 0.529883 | false |
yeiniel/aurora | aurora/webapp/testing.py | 1 | 6684 | # Copyright (c) 2011, Yeiniel Suarez Sosa.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Yeiniel Suarez Sosa. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, Yeiniel Suarez Sosa.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Yeiniel Suarez Sosa. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import unittest
from aurora.webapp import foundation, mapping
__all__ = ['TestHandler', 'TestRule']
class TestHandler(unittest.TestCase):
""" Tests for Web request handlers.
This test case provide common tests to all Web request handlers. Is is not
a framework test case but a base class for
:class:`Web request handler<.foundation.Handler>` tests.
You need to override the :attr:`handler_factory` class attribute used to
set the Web request handler factory used at test suite setup.
"""
handler_factory = foundation.Handler
request_factory = foundation.Request
def setUp(self):
self.handler = self.handler_factory()
def test_response_type(self):
""" Response object type test.
The :class:`Web response <.foundation.Response>` object returned by
the call to the Web request handler should be an instance of the same
type of the objects returned by calls to the
:class:`Web request <.foundation.Request>`
:attr:`~.foundation.Request.response_factory` attribute.
"""
request = self.request_factory({})
response = self.handler(request)
self.assertEqual(
type(response), type(request.response_factory()), __doc__
)
class TestRule(unittest.TestCase):
""" Tests for Web request path mapping rules.
This test case provide common tests to all Web request path mapping rules.
Is is not a framework test case but a base class for
:class:`Web request path mapping rule<.mapping.Rule>` tests.
You need to override the :attr:`rule_factory` class attribute
used to set the Web request path mapping rule factory used at test suite
setup.
The set of test is permissive, they test if the
:class:`Web request path mapping rule<.mapping.Rule>` object
implement the
:class:`Web request path mapping rule<.mapping.Rule>` protocol
but don't fail if the
:class:`Web request path mapping rule<.mapping.Rule>` object
provide additional features (an augmented argument list for example).
"""
rule_factory = mapping.Rule
def setUp(self):
self.rule = self.rule_factory()
def test_match_accept_one_positional_argument(self):
""" Test if the rule `match` method accept one positional argument.
This test call the `match` method of the rule and assert if the
result is `False` or a `Mapping`.
"""
result = self.rule.match('/')
self.assertTrue(
result is False or isinstance(result, collections.Mapping),
__doc__
)
def test_assemble_without_arguments(self):
""" Test if the rule `assemble` method can be called without arguments.
This test call the `assemble` method of the rule and assert if the
result is `False` or a string.
"""
result = self.rule.assemble()
self.assertTrue(result is False or isinstance(result, str), __doc__)
def test_assemble_accept_arbitrary_named_arguments(self):
""" Test if the rule assemble method accept arbitrary named arguments.
This test call the `assemble` method of the rule and assert if the
result is `False` or a string.
"""
data = {}
data.update(map(
lambda item: (item, item),
map(lambda item: chr(item), range(1000))
))
result = self.rule.assemble(**data)
self.assertTrue(result is False or isinstance(result, str), __doc__) | bsd-3-clause | 5,616,323,407,553,251,000 | 41.310127 | 79 | 0.710952 | false |
cdw/d10_workloops | img_ave/mat_and_dir.py | 1 | 1311 | #!/usr/bin/env python
# encoding: utf-8
"""
mat_and_dir.py - reorganize directories and read in .MAT files
Let's be honest here, this is storage for stuff we don't need in
fn_parse_and_sort.
Created by Dave Williams on 2014-11-03
"""
import os
from scipy.io import loadmat
from fn_parse_and_sort import chchdir
## Post processing of dirs
def get_trial_dirs_in_dir(dirname):
"""Return all the directories that are like 'T002'"""
chchdir(dirname)
dirnames = os.walk('.').next()[1]
return filter(lambda d: d.startswith('T'), dirnames)
def tiff_name_parse(fn):
"""Parse out a tiff name to moth, trial, and image number"""
moth = fn.split('-')[0].strip('Moth')
trial = int(fn.split('-')[1].split('_')[0][2:])
im_num = int(fn.split('-')[1].split('_')[1].strip('.tif'))
return moth, trial, im_num
## .MAT file processing
def parse_mat(fn):
"""Load data from a .mat file, namely Argonne_2013_mothdata_sansWL.mat"""
mat = loadmat(fn, chars_as_strings=True, struct_as_record=False,
squeeze_me=True)
data = mat.items()[0][-1]
return data
def extract_precession(data):
"""From .mat file, create dict of moths and trial precession values"""
precession = dict([(d.moth_label, map(bool, d.precess)) for d in data])
return precession
| mit | -5,131,101,391,298,932,000 | 28.133333 | 77 | 0.652174 | false |
david-hoffman/scripts | movie_converter.py | 1 | 1698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# movie_converter.py
"""
Convert from shitty amira format to clean format
Copyright David Hoffman, 2019
"""
import cv2
import glob
import dask
import os
new_ext = "_conv.mpg"
def update_path(path):
return path.replace(".mpg", new_ext)
@dask.delayed
def convert_file(path):
"""Convert movie file with opencv"""
print(path)
# open the video file
cap = cv2.VideoCapture(path)
cap.retrieve()
# set the codec (only one that works here)
fourcc = cv2.VideoWriter_fourcc(*"M1V1")
# begin loop
out = None
while True:
# try and get next frame
ret, frame = cap.read()
if not ret:
break
# initialize for first iteration
if out is None:
# writer expects (width, height) tuple for shape
out = cv2.VideoWriter(update_path(path), fourcc, 25.0, frame.shape[:2][::-1], True)
# write frame
out.write(frame)
# close objects
cap.release()
out.release()
return path
def new_files():
# filter out converted files
paths = filter(lambda x: new_ext not in x, glob.iglob("*.mpg"))
for path in paths:
t_orig = os.path.getmtime(path)
try:
t_conv = os.path.getmtime(update_path(path))
if t_orig > t_conv:
# converted file is older then original file
yield path
except FileNotFoundError:
# no converted file
yield path
def main():
# convert all files in the folder
print(dask.delayed(list(map(convert_file, new_files()))).compute(scheduler="processes"))
if __name__ == "__main__":
main()
| apache-2.0 | 8,498,430,353,338,681,000 | 22.260274 | 95 | 0.59364 | false |
DeppSRL/open_bilanci | bilanci_project/bilanci/management/commands/context.py | 1 | 12555 | # coding=utf-8
import logging
from optparse import make_option
from pprint import pprint
import re
import numpy
import math
from django.conf import settings
from django.core.management import BaseCommand
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from bilanci.utils.comuni import FLMapper
from territori.models import Territorio, Contesto
from bilanci.utils import couch
class Command(BaseCommand):
couchdb = None
logger = logging.getLogger('management')
comuni_dicts = {}
option_list = BaseCommand.option_list + (
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help="""
Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All".
NOTE: Cities are considered only for set_contesto function
"""),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server to connect to (defaults to staging).'),
make_option('--skip-existing',
dest='skip_existing',
action='store_true',
default=False,
help='Skip existing cities. Use to speed up long import of many cities, when errors occur'),
make_option('--dry-run',
dest='dryrun',
action='store_true',
default=False,
help='Set the dry-run command mode: nothing is written in the db'),
make_option('--append',
dest='append',
action='store_true',
default=False,
help='Use the log file appending instead of overwriting (used when launching shell scripts)'),
)
help = """
Compute territorial context data for the Bilanci db:
"""
def clean_data(self,data):
if data:
if data == "N.C.":
return None
else:
# removes the decimals, if any
regex = re.compile("^.+,([\d]{2})$")
matches = regex.findall(data)
if len(matches) > 0:
data = data[:-3]
# removes the thousand-delimiter point and the comma and converts to int
ret = int(data.replace(".","").replace(",",""))
if ret > 10 * 1000 * 1000:
return None
else:
return ret
def get_data(self, territorio, years, key_name):
# gets the context data relative to the provided key_name
# from couchdb objects and checks that the numeric data on the values provided.
# If some value is out of the line of the mean and variance then the value is discarded.
# Return value: a list of tuple [(year, value), ...] of correct values
value_set = []
value_dict = {}
results = []
high_variance_set = False
titoli_possibile_names = [
"quadro-1-dati-generali-al-31-dicembrenotizie-varie",
"quadro-1-dati-generali-al-31-dicembre-notizie-varie",
"quadro-1-dati-generali-al-31-dicembre-1-notizie-varie"
]
# generates bilancio ids
bilancio_ids = ["{0}_{1}".format(year, territorio.cod_finloc) for year in years]
# read data from couch
for bilancio_id in bilancio_ids:
if bilancio_id in self.couchdb:
bilancio_data = self.couchdb[bilancio_id]
if "01" in bilancio_data['consuntivo']:
for titolo_name in titoli_possibile_names:
if titolo_name in bilancio_data["consuntivo"]["01"]:
break
else:
titolo_name = None
if titolo_name:
contesto_couch = bilancio_data["consuntivo"]["01"]\
[titolo_name]["data"]
if key_name in contesto_couch:
clean_data = self.clean_data(contesto_couch[key_name][0])
# clean_data is None if the contesto_data is = "N.C", so I set it for deletion
if clean_data is None:
results.append((bilancio_id[0:4],None))
else:
value_set.append(clean_data)
value_dict[int(bilancio_id[0:4])] = self.clean_data(contesto_couch[key_name][0])
else:
self.logger.warning(u"Titolo 'quadro-1-dati-generali-al-31-dicembre[-]notizie-varie' not found for id:{0}, skipping". format(bilancio_id))
else:
self.logger.warning(u"Quadro '01' Consuntivo not found for id:{0}, skipping".format(bilancio_id))
else:
self.logger.warning(u"Bilancio obj not found for id:{0}, skipping". format(bilancio_id))
if len(value_set) == 0:
self.logger.warning(u"Cannot find data about {0} for city:{1} during the years:{2}".format(key_name, territorio, years))
return
mean = numpy.mean(value_set)
variance = numpy.var(value_set)
# Coherence check on values.
# if the fraction between sigma and mean exceeds the constant then all the values out of the
# gaussian distr. will be discarded. Otherwise all values are taken
if math.sqrt(variance)/mean > 0.1:
high_variance_set = True
for anno, value in value_dict.iteritems():
if high_variance_set:
if pow((value-mean),2) < variance:
results.append((anno, value))
else:
results.append((anno, None))
else:
results.append((anno, value))
return results
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
###
# dry run
###
dryrun = options['dryrun']
skip_existing = options['skip_existing']
if options['append'] is True:
self.logger = logging.getLogger('management_append')
###
# cities
###
cities_codes = options['cities']
if not cities_codes:
raise Exception("Missing cities parameter")
mapper = FLMapper()
cities = mapper.get_cities(cities_codes)
if cities_codes.lower() != 'all':
self.logger.info("Processing cities: {0}".format(cities))
###
# years
###
years = options['years']
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years = range(int(start_year), int(end_year)+1)
else:
years = [int(y.strip()) for y in years.split(",") if 2001 < int(y.strip()) < 2013]
if not years:
raise Exception("No suitable year found in {0}".format(years))
self.logger.info("Processing years: {0}".format(years))
self.years = years
###
# couchdb
###
couchdb_server_alias = options['couchdb_server']
couchdb_dbname = settings.COUCHDB_NORMALIZED_VOCI_NAME
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
self.logger.error(u"Unknown couchdb server alias.")
return
self.logger.info(u"Connecting to db: {0}".format(couchdb_dbname))
self.couchdb = couch.connect(
couchdb_dbname,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
# set contesto and filter out missing territories
missing_territories = []
recalculate_percapita_cities = []
for city in cities:
try:
territorio = Territorio.objects.get(cod_finloc=city)
except ObjectDoesNotExist:
self.logger.warning(u"City {0} not found among territories in DB. Skipping.".format(city))
missing_territories.append(city)
continue
# if skip_existing and the territorio has 1+ contesto then skip territorio
if skip_existing and Contesto.objects.filter(territorio=territorio).count() > 0:
self.logger.info(u"Skip Existing - City:{0} already has context, skipping".format(territorio.denominazione))
continue
self.logger.info(u"Setting context for city: {0}".format(territorio,))
# note: the following keys will not be stored in the db because
# the number format is not constant through the years
#
# "nuclei familiari (n)":"bil_nuclei_familiari",
# "superficie urbana (ha)":"bil_superficie_urbana",
# "superficie totale del comune (ha)":"bil_superficie_totale",
# "lunghezza delle strade esterne (km)":"bil_strade_esterne",
# "lunghezza delle strade interne centro abitato (km)":"bil_strade_interne",
# "di cui: in territorio montano (km)":"bil_strade_montane",
key_name = "popolazione residente (ab.)"
data_results = self.get_data(territorio, years, key_name)
for data_result in data_results:
contesto_pg = None
year, value = data_result
# if value is None it means that the year selected had a value for the key that is not acceptable or wrong
# then if the value for that specific year is already in the db, it has to be deleted
if value is None:
self.logger.warning(u"Deleting wrong value for city:{0} year:{1}".format(territorio.denominazione, year))
_ = Contesto.objects.filter(
anno = year,
territorio = territorio,
).delete()
if territorio not in recalculate_percapita_cities:
recalculate_percapita_cities.append(territorio)
continue
# if the contesto data is not present, inserts the data in the db
# otherwise skips
try:
contesto_pg = Contesto.objects.get(
anno = year,
territorio = territorio,
)
except ObjectDoesNotExist:
contesto_pg = Contesto()
pass
# write data on postgres
if dryrun is False:
contesto_pg.bil_popolazione_residente = value
self.logger.debug(u"year:{0}, value:{1}".format(year, value,))
contesto_pg.territorio = territorio
contesto_pg.anno = year
contesto_pg.save()
if len(missing_territories)>0:
self.logger.error(u"Following cities could not be found in Territori DB and could not be processed:")
for missing_city in missing_territories:
self.logger.error("{0}".format(missing_city))
percapita_cmd_string = u"python manage.py percapita -v2 --years={0} --cities=".format(options['years'])
if len(recalculate_percapita_cities)>0:
self.logger.error(u"Following cities had at least one wrong context data and percapita should be recalculated with this command:")
for missing_city in recalculate_percapita_cities:
numeric_codfinloc = missing_city.cod_finloc.split("--")[1]
percapita_cmd_string+=numeric_codfinloc+","
self.logger.error(percapita_cmd_string[:-1]) | mit | 1,785,778,014,623,933,000 | 37.048485 | 162 | 0.542174 | false |
dtnaylor/web-profiler | webloader/curl_loader.py | 1 | 4321 | import os
import logging
import traceback
import shlex
import subprocess
import string
from collections import defaultdict
from loader import Loader, LoadResult, Timeout, TimeoutError
CURL = '/usr/bin/env curl'
class CurlLoader(Loader):
'''Subclass of :class:`Loader` that loads pages using curl.
.. note:: The :class:`CurlLoader` currently does not support HTTP2.
.. note:: The :class:`CurlLoader` currently does not support caching.
.. note:: The :class:`CurlLoader` currently does not support full page loading (i.e., fetching a page's subresources).
.. note:: The :class:`CurlLoader` currently does not support saving HARs.
.. note:: The :class:`CurlLoader` currently does not support saving screenshots.
.. note:: The :class:`CurlLoader` currently does not support saving content.
'''
def __init__(self, **kwargs):
super(CurlLoader, self).__init__(**kwargs)
if self._http2:
raise NotImplementedError('CurlLoader does not support HTTP2')
if not self._disable_local_cache:
raise NotImplementedError('CurlLoader does not support local caching')
if self._full_page:
raise NotImplementedError('CurlLoader does not support loading a full page')
if self._save_har:
raise NotImplementedError('CurlLoader does not support saving HARs')
if self._save_screenshot:
raise NotImplementedError('CurlLoader does not support saving screensthos')
if self._delay_after_onload != 0:
raise NotImplementedError('CurlLoader does not support delay after onload')
if self._save_content != 'never':
raise NotImplementedError('CurlLoader does not support saving content')
self._image_paths_by_url = defaultdict(list)
def _load_page(self, url, outdir, trial_num=-1):
# load the specified URL
logging.info('Loading page: %s', url)
try:
# prepare the curl command
curl_cmd = CURL
curl_cmd += ' -s -S' # don't show progress meter
curl_cmd += ' -L' # follow redirects
curl_cmd += ' -o /dev/null' # don't print file to stdout
curl_cmd += ' -w http_code=%{http_code};final_url=%{url_effective};time=%{time_total};size=%{size_download}' # format for stats at end
curl_cmd += ' --connect-timeout %i' % self._timeout # TCP connect timeout
if self._disable_network_cache:
curl_cmd += ' --header "Cache-Control: max-age=0"' # disable network caches
if self._user_agent:
curl_cmd += ' --user-agent "%s"' % self._user_agent # custom user agent
curl_cmd += ' %s' % url
# load the page
logging.debug('Running curl: %s', curl_cmd)
with Timeout(seconds=self._timeout+5):
output = subprocess.check_output(shlex.split(curl_cmd))
logging.debug('curl returned: %s', output.strip())
# curl returned, but may or may not have succeeded
returnvals = {field.split('=')[0]: field.split('=')[1] for field in output.split('\n')[-1].split(';')}
if returnvals['http_code'] != '200':
return LoadResult(LoadResult.FAILURE_NO_200, url)
else:
# Report status and time
return LoadResult(LoadResult.SUCCESS,
url,
final_url=returnvals['final_url'],
time=float(string.replace(returnvals['time'], ',', '.')),
size=returnvals['size'])
# problem running curl
except TimeoutError:
logging.exception('Timeout fetching %s', url)
return LoadResult(LoadResult.FAILURE_TIMEOUT, url)
except subprocess.CalledProcessError as e:
logging.exception('Error loading %s: %s\n%s' % (url, e, e.output))
if e.returncode == 28:
return LoadResult(LoadResult.FAILURE_TIMEOUT, url)
else:
return LoadResult(LoadResult.FAILURE_UNKNOWN, url)
except Exception as e:
logging.exception('Error loading %s: %s\n%s' % (url, e, traceback.format_exc()))
return LoadResult(LoadResult.FAILURE_UNKNOWN, url)
| mit | 2,313,424,952,162,075,000 | 45.462366 | 148 | 0.601481 | false |
rwl/PyCIM | CIM15/IEC61970/Informative/InfAssets/SubstationAsset.py | 1 | 1963 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Assets.Asset import Asset
class SubstationAsset(Asset):
"""Substation asset.Substation asset.
"""
def __init__(self, function="transmission", *args, **kw_args):
"""Initialises a new 'SubstationAsset' instance.
@param function: Function of this substation asset. Values are: "transmission", "distribution", "other", "generation", "subTransmission", "industrial"
"""
#: Function of this substation asset. Values are: "transmission", "distribution", "other", "generation", "subTransmission", "industrial"
self.function = function
super(SubstationAsset, self).__init__(*args, **kw_args)
_attrs = ["function"]
_attr_types = {"function": str}
_defaults = {"function": "transmission"}
_enums = {"function": "SubstationFunctionKind"}
_refs = []
_many_refs = []
| mit | 3,252,123,477,085,998,600 | 44.651163 | 158 | 0.720326 | false |
openstack/ironic | ironic/drivers/modules/fake.py | 1 | 12056 | # -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake driver interfaces used in testing.
This is also an example of some kinds of things which can be done within
drivers. For instance, the MultipleVendorInterface class demonstrates how to
load more than one interface and wrap them in some logic to route incoming
vendor_passthru requests appropriately. This can be useful eg. when mixing
functionality between a power interface and a deploy interface, when both rely
on separate vendor_passthru methods.
"""
from oslo_log import log
from ironic.common import boot_devices
from ironic.common import components
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import indicator_states
from ironic.common import states
from ironic.drivers import base
from ironic import objects
LOG = log.getLogger(__name__)
class FakePower(base.PowerInterface):
"""Example implementation of a simple power interface."""
def get_properties(self):
return {}
def validate(self, task):
pass
def get_power_state(self, task):
return task.node.power_state
def reboot(self, task, timeout=None):
pass
def set_power_state(self, task, power_state, timeout=None):
if power_state not in [states.POWER_ON, states.POWER_OFF,
states.SOFT_REBOOT, states.SOFT_POWER_OFF]:
raise exception.InvalidParameterValue(
_("set_power_state called with an invalid power "
"state: %s.") % power_state)
task.node.power_state = power_state
def get_supported_power_states(self, task):
return [states.POWER_ON, states.POWER_OFF, states.REBOOT,
states.SOFT_REBOOT, states.SOFT_POWER_OFF]
class FakeBoot(base.BootInterface):
"""Example implementation of a simple boot interface."""
# NOTE(TheJulia): default capabilities to make unit tests
# happy with the fake boot interface.
capabilities = ['ipxe_boot', 'pxe_boot']
def get_properties(self):
return {}
def validate(self, task):
pass
def prepare_ramdisk(self, task, ramdisk_params, mode='deploy'):
pass
def clean_up_ramdisk(self, task, mode='deploy'):
pass
def prepare_instance(self, task):
pass
def clean_up_instance(self, task):
pass
class FakeDeploy(base.DeployInterface):
"""Class for a fake deployment driver.
Example implementation of a deploy interface that uses a
separate power interface.
"""
def get_properties(self):
return {}
def validate(self, task):
pass
@base.deploy_step(priority=100)
def deploy(self, task):
return None
def tear_down(self, task):
return states.DELETED
def prepare(self, task):
pass
def clean_up(self, task):
pass
def take_over(self, task):
pass
class FakeVendorA(base.VendorInterface):
"""Example implementation of a vendor passthru interface."""
def get_properties(self):
return {'A1': 'A1 description. Required.',
'A2': 'A2 description. Optional.'}
def validate(self, task, method, **kwargs):
if method == 'first_method':
bar = kwargs.get('bar')
if not bar:
raise exception.MissingParameterValue(_(
"Parameter 'bar' not passed to method 'first_method'."))
@base.passthru(['POST'],
description=_("Test if the value of bar is baz"))
def first_method(self, task, http_method, bar):
return True if bar == 'baz' else False
class FakeVendorB(base.VendorInterface):
"""Example implementation of a secondary vendor passthru."""
def get_properties(self):
return {'B1': 'B1 description. Required.',
'B2': 'B2 description. Required.'}
def validate(self, task, method, **kwargs):
if method in ('second_method', 'third_method_sync',
'fourth_method_shared_lock'):
bar = kwargs.get('bar')
if not bar:
raise exception.MissingParameterValue(_(
"Parameter 'bar' not passed to method '%s'.") % method)
@base.passthru(['POST'],
description=_("Test if the value of bar is kazoo"))
def second_method(self, task, http_method, bar):
return True if bar == 'kazoo' else False
@base.passthru(['POST'], async_call=False,
description=_("Test if the value of bar is meow"))
def third_method_sync(self, task, http_method, bar):
return True if bar == 'meow' else False
@base.passthru(['POST'], require_exclusive_lock=False,
description=_("Test if the value of bar is woof"))
def fourth_method_shared_lock(self, task, http_method, bar):
return True if bar == 'woof' else False
class FakeConsole(base.ConsoleInterface):
"""Example implementation of a simple console interface."""
def get_properties(self):
return {}
def validate(self, task):
pass
def start_console(self, task):
pass
def stop_console(self, task):
pass
def get_console(self, task):
return {}
class FakeManagement(base.ManagementInterface):
"""Example implementation of a simple management interface."""
def get_properties(self):
return {}
def validate(self, task):
# TODO(dtantsur): remove when snmp hardware type no longer supports the
# fake management.
if task.node.driver == 'snmp':
LOG.warning('Using "fake" management with "snmp" hardware type '
'is deprecated, use "noop" instead for node %s',
task.node.uuid)
def get_supported_boot_devices(self, task):
return [boot_devices.PXE]
def set_boot_device(self, task, device, persistent=False):
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
def get_boot_device(self, task):
return {'boot_device': boot_devices.PXE, 'persistent': False}
def get_sensors_data(self, task):
return {}
def get_supported_indicators(self, task, component=None):
indicators = {
components.CHASSIS: {
'led-0': {
"readonly": True,
"states": [
indicator_states.OFF,
indicator_states.ON
]
}
},
components.SYSTEM: {
'led': {
"readonly": False,
"states": [
indicator_states.BLINKING,
indicator_states.OFF,
indicator_states.ON
]
}
}
}
return {c: indicators[c] for c in indicators
if not component or component == c}
def get_indicator_state(self, task, component, indicator):
indicators = self.get_supported_indicators(task)
if component not in indicators:
raise exception.InvalidParameterValue(_(
"Invalid component %s specified.") % component)
if indicator not in indicators[component]:
raise exception.InvalidParameterValue(_(
"Invalid indicator %s specified.") % indicator)
return indicator_states.ON
class FakeInspect(base.InspectInterface):
"""Example implementation of a simple inspect interface."""
def get_properties(self):
return {}
def validate(self, task):
pass
def inspect_hardware(self, task):
return states.MANAGEABLE
class FakeRAID(base.RAIDInterface):
"""Example implementation of simple RAIDInterface."""
def get_properties(self):
return {}
def create_configuration(self, task, create_root_volume=True,
create_nonroot_volumes=True):
pass
def delete_configuration(self, task):
pass
class FakeBIOS(base.BIOSInterface):
"""Fake implementation of simple BIOSInterface."""
def get_properties(self):
return {}
def validate(self, task):
pass
@base.clean_step(priority=0, argsinfo={
'settings': {'description': ('List of BIOS settings, each item needs '
'to contain a dictionary with name/value pairs'),
'required': True}})
def apply_configuration(self, task, settings):
# Note: the implementation of apply_configuration in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
# contributor/bios_develop.html.
node_id = task.node.id
create_list, update_list, delete_list, nochange_list = (
objects.BIOSSettingList.sync_node_setting(task.context, node_id,
settings))
if len(create_list) > 0:
objects.BIOSSettingList.create(task.context, node_id, create_list)
if len(update_list) > 0:
objects.BIOSSettingList.save(task.context, node_id, update_list)
if len(delete_list) > 0:
delete_names = [setting['name'] for setting in delete_list]
objects.BIOSSettingList.delete(task.context, node_id,
delete_names)
# nochange_list is part of return of sync_node_setting and it might be
# useful to the drivers to give a message if no change is required
# during application of settings.
if len(nochange_list) > 0:
pass
@base.clean_step(priority=0)
def factory_reset(self, task):
# Note: the implementation of factory_reset in fake interface is
# just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
# contributor/bios_develop.html.
node_id = task.node.id
setting_objs = objects.BIOSSettingList.get_by_node_id(
task.context, node_id)
for setting in setting_objs:
objects.BIOSSetting.delete(task.context, node_id, setting.name)
@base.clean_step(priority=0)
def cache_bios_settings(self, task):
# Note: the implementation of cache_bios_settings in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
# contributor/bios_develop.html.
pass
class FakeStorage(base.StorageInterface):
"""Example implementation of simple storage Interface."""
def validate(self, task):
pass
def get_properties(self):
return {}
def attach_volumes(self, task):
pass
def detach_volumes(self, task):
pass
def should_write_image(self, task):
return True
class FakeRescue(base.RescueInterface):
"""Example implementation of a simple rescue interface."""
def get_properties(self):
return {}
def validate(self, task):
pass
def rescue(self, task):
return states.RESCUE
def unrescue(self, task):
return states.ACTIVE
| apache-2.0 | 637,479,506,789,311,900 | 30.560209 | 79 | 0.616871 | false |
jbogaardt/chainladder-python | examples/plot_voting_chainladder.py | 1 | 1193 | """
==========================
Voting Chainladder Example
==========================
This example demonstrates how you can can use the Voting Chainladder method.
"""
import numpy as np
import pandas as pd
import chainladder as cl
# Load the data
raa = cl.load_sample('raa')
cl_ult = cl.Chainladder().fit(raa).ultimate_ # Chainladder Ultimate
apriori = cl_ult * 0 + (float(cl_ult.sum()) / 10) # Mean Chainladder Ultimate
# Load estimators to vote between
bcl = cl.Chainladder()
cc = cl.CapeCod()
estimators = [('bcl', bcl), ('cc', cc)]
# Fit VotingChainladder using CC after 1987 and a blend of BCL and CC otherwise
vot = cl.VotingChainladder(
estimators=estimators,
weights=lambda origin: (0, 1) if origin.year > 1987 else (0.5, 0.5)
)
vot.fit(raa, sample_weight=apriori)
# Plotting
bcl_ibnr = bcl.fit(raa).ibnr_.to_frame()
cc_ibnr = cc.fit(raa, sample_weight=apriori).ibnr_.to_frame()
vot_ibnr = vot.ibnr_.to_frame()
plot_ibnr = pd.concat([bcl_ibnr, vot_ibnr, cc_ibnr], axis=1)
plot_ibnr.columns = ['BCL', 'Voting', 'CC']
g = plot_ibnr.plot(
kind='bar', ylim=(0, None), grid=True,
title='Voting Chainladder IBNR').set(
xlabel='Accident Year', ylabel='Loss');
| mit | -1,812,801,947,728,713,000 | 28.825 | 79 | 0.658005 | false |
mitodl/ccxcon | courses/models_test.py | 1 | 2788 | """
Tests for Models
"""
import json
from django.test import TestCase
from django.contrib.auth.models import User
from .factories import CourseFactory, ModuleFactory
from courses.models import Course, Module, UserInfo
# pylint: disable=no-self-use
class CourseTests(TestCase):
"""
Tests for Course
"""
def test_tostring(self):
"""
Test behavior of str(Course)
"""
assert str(Course(title='test')) == 'test'
def test_towebhook(self):
"""
test to_webhook implementation returns valid json object
"""
course = CourseFactory.create()
out = course.to_webhook()
json.dumps(out) # Test to ensure it's json dumpable.
ex_pk = out['external_pk']
assert out['instance'] == course.edx_instance.instance_url
assert out['course_id'] == course.course_id
assert out['author_name'] == course.author_name
assert out['overview'] == course.overview
assert out['description'] == course.description
assert out['image_url'] == course.image_url
assert out['instructors'] == [str(instructor) for instructor in course.instructors.all()]
assert isinstance(ex_pk, str)
assert '-' in ex_pk
class ModuleTests(TestCase):
"""
Tests for Module
"""
def test_tostring(self):
"""
Test behavior of str(Module)
"""
assert str(Module(title='test')) == 'test'
def test_ordering(self):
"""
Test module ordering is by course/order.
"""
c1 = CourseFactory.create()
c2 = CourseFactory.create()
# Intentionally not in created in course-order so we can validate it's
# not by id.
m10 = ModuleFactory.create(course=c1, order=0)
m21 = ModuleFactory.create(course=c2, order=1)
m20 = ModuleFactory.create(course=c2, order=0)
m11 = ModuleFactory.create(course=c1, order=1)
result = [x.id for x in Module.objects.all()]
assert result == [m10.id, m11.id, m20.id, m21.id]
def test_towebhook(self):
"""
test to_webhook implementation returns valid json object
"""
module = ModuleFactory.build()
web_out = module.to_webhook()
json.dumps(web_out) # Test to ensure it's json dumpable.
assert web_out['instance'] == module.course.edx_instance.instance_url
for k in ('external_pk', 'course_external_pk'):
assert isinstance(web_out[k], str)
assert '-' in web_out[k]
class UserInfoTests(TestCase):
"""
Tests for UserInfo
"""
def test_tostring(self):
"""
Test behavior of str(UserInfo)
"""
assert str(UserInfo(user=User(username='test'))) == 'Profile for test'
| agpl-3.0 | 3,154,952,932,236,635,600 | 29.977778 | 97 | 0.602582 | false |
brianmckenna/sci-wms | wmsrest/views.py | 1 | 4583 | # -*- coding: utf-8 -*-
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from wms.models import Dataset, Layer, VirtualLayer, Variable
from wmsrest.serializers import DatasetSerializer, SGridDatasetSerializer, UGridDatasetSerializer, RGridDatasetSerializer, LayerSerializer, VirtualLayerSerializer, VariableSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from django.http import Http404
class DatasetList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Dataset.objects.select_related().all()
serializer = DatasetSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DatasetDetail(APIView):
"""
Get or update a specific Sci-WMS dataset.
Supports GET, PUT, DELETE, and PATCH methods.
A DELETE on a dataset with a defined m2m relationship
to another table will also delete that relationship.
PUT and PATCH requests with a defined m2m relations
to another table will be updated accordingly.
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
def get_object(self, pk):
try:
return Dataset.objects.get(pk=pk)
except Dataset.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
dataset = self.get_object(pk)
serializer = DatasetSerializer(dataset)
return Response(serializer.data)
def put(self, request, pk, format=None):
dataset = self.get_object(pk)
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(dataset, data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(dataset, data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(dataset, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
dataset = self.get_object(pk)
dataset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = LayerSerializer
queryset = Layer.objects.all()
class VirtuallLayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VirtualLayerSerializer
queryset = VirtualLayer.objects.all()
class DefaultDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VariableSerializer
queryset = Variable.objects.all()
class DefaultList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Variable.objects.all()
serializer = VariableSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = VariableSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| gpl-3.0 | -4,777,197,042,386,780,000 | 36.876033 | 182 | 0.690814 | false |
uclapi/uclapi | backend/uclapi/timetable/migrations/0010_auto_20190220_1835.py | 1 | 3406 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-20 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetable', '0009_coursea_courseb'),
]
operations = [
migrations.AlterField(
model_name='modulegroupsa',
name='csize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='estsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='groupnum',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='maxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='mequivid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='minsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='parentkey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='prefmaxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='thiskey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='csize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='estsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='groupnum',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='maxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='mequivid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='minsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='parentkey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='prefmaxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='thiskey',
field=models.IntegerField(blank=True, null=True),
),
]
| mit | 4,278,143,463,914,907,600 | 31.438095 | 61 | 0.551674 | false |
hanlind/nova | nova/conf/cells.py | 1 | 15948 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cells_group = cfg.OptGroup('cells',
title='Cells Options',
help="""
Cells options allow you to use cells functionality in openstack
deployment.
""")
cells_opts = [
cfg.StrOpt('topic',
default='cells',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
Configurable RPC topics provide little value and can result in a wide variety
of errors. They should not be used.
""",
help="""
Topic.
This is the message queue topic that cells nodes listen on. It is
used when the cells service is started up to configure the queue,
and whenever an RPC call to the scheduler is made.
Possible values:
* cells: This is the recommended and the default value.
"""),
cfg.BoolOpt('enable',
default=False,
help="""
Enable cell functionality.
When this functionality is enabled, it lets you to scale an OpenStack
Compute cloud in a more distributed fashion without having to use
complicated technologies like database and message queue clustering.
Cells are configured as a tree. The top-level cell should have a host
that runs a nova-api service, but no nova-compute services. Each
child cell should run all of the typical nova-* services in a regular
Compute cloud except for nova-api. You can think of cells as a normal
Compute deployment in that each cell has its own database server and
message queue broker.
Related options:
* name: A unique cell name must be given when this functionality
is enabled.
* cell_type: Cell type should be defined for all cells.
"""),
cfg.StrOpt('name',
default='nova',
help="""
Name of the current cell.
This value must be unique for each cell. Name of a cell is used as
its id, leaving this option unset or setting the same name for
two or more cells may cause unexpected behaviour.
Related options:
* enabled: This option is meaningful only when cells service
is enabled
"""),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help="""
Cell capabilities.
List of arbitrary key=value pairs defining capabilities of the
current cell to be sent to the parent cells. These capabilities
are intended to be used in cells scheduler filters/weighers.
Possible values:
* key=value pairs list for example;
``hypervisor=xenserver;kvm,os=linux;windows``
"""),
cfg.IntOpt('call_timeout',
default=60,
min=0,
help="""
Call timeout.
Cell messaging module waits for response(s) to be put into the
eventlet queue. This option defines the seconds waited for
response from a call to a cell.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('reserve_percent',
default=10.0,
help="""
Reserve percentage
Percentage of cell capacity to hold in reserve, so the minimum
amount of free resource is considered to be;
min_free = total * (reserve_percent / 100.0)
This option affects both memory and disk utilization.
The primary purpose of this reserve is to ensure some space is
available for users who want to resize their instance to be larger.
Note that currently once the capacity expands into this reserve
space this option is ignored.
Possible values:
* An integer or float, corresponding to the percentage of cell capacity to
be held in reserve.
"""),
cfg.StrOpt('cell_type',
default='compute',
choices=('api', 'compute'),
help="""
Type of cell.
When cells feature is enabled the hosts in the OpenStack Compute
cloud are partitioned into groups. Cells are configured as a tree.
The top-level cell's cell_type must be set to ``api``. All other
cells are defined as a ``compute cell`` by default.
Related option:
* quota_driver: Disable quota checking for the child cells.
(nova.quota.NoopQuotaDriver)
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('mute_child_interval',
default=300,
help="""
Mute child interval.
Number of seconds after which a lack of capability and capacity
update the child cell is to be treated as a mute cell. Then the
child cell will be weighed as recommend highly that it be skipped.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('bandwidth_update_interval',
default=600,
help="""
Bandwidth update interval.
Seconds between bandwidth usage cache updates for cells.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_update_sync_database_limit',
default=100,
help="""
Instance update sync database limit.
Number of instances to pull from the database at one time for
a sync. If there are more instances to update the results will
be paged through.
Possible values:
* An integer, corresponding to a number of instances.
"""),
]
mute_weigher_opts = [
# TODO(sfinucan): Add max parameter
cfg.FloatOpt('mute_weight_multiplier',
default=-10000.0,
help="""
Mute weight multiplier.
Multiplier used to weigh mute children. Mute children cells are
recommended to be skipped so their weight is multiplied by this
negative value.
Possible values:
* Negative numeric number
"""),
]
ram_weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help="""
Ram weight multiplier.
Multiplier used for weighing ram. Negative numbers indicate that
Compute should stack VMs on one host instead of spreading out new
VMs to more hosts in the cell.
Possible values:
* Numeric multiplier
"""),
]
weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('offset_weight_multiplier',
default=1.0,
help="""
Offset weight multiplier
Multiplier used to weigh offset weigher. Cells with higher
weight_offsets in the DB will be preferred. The weight_offset
is a property of a cell stored in the database. It can be used
by a deployer to have scheduling decisions favor or disfavor
cells based on the setting.
Possible values:
* Numeric multiplier
"""),
]
cell_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_updated_at_threshold',
default=3600,
help="""
Instance updated at threshold
Number of seconds after an instance was updated or deleted to
continue to update cells. This option lets cells manager to only
attempt to sync instances that have been updated recently.
i.e., a threshold of 3600 means to only update instances that
have modified in the last hour.
Possible values:
* Threshold in seconds
Related options:
* This value is used with the ``instance_update_num_instances``
value in a periodic task run.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt("instance_update_num_instances",
default=1,
help="""
Instance update num instances
On every run of the periodic task, nova cells manager will attempt to
sync instance_updated_at_threshold number of instances. When the
manager gets the list of instances, it shuffles them so that multiple
nova-cells services do not attempt to sync the same instances in
lockstep.
Possible values:
* Positive integer number
Related options:
* This value is used with the ``instance_updated_at_threshold``
value in a periodic task run.
""")
]
cell_messaging_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('max_hop_count',
default=10,
help="""
Maximum hop count
When processing a targeted message, if the local cell is not the
target, a route is defined between neighbouring cells. And the
message is processed across the whole routing path. This option
defines the maximum hop counts until reaching the target.
Possible values:
* Positive integer value
"""),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help="""
Cells scheduler.
The class of the driver used by the cells scheduler. This should be
the full Python path to the class to be used. If nothing is specified
in this option, the CellsScheduler is used.
""")
]
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="""
RPC driver queue base.
When sending a message to another cell by JSON-ifying the message
and making an RPC cast to 'process_message', a base queue is used.
This option defines the base queue name to be used when communicating
between cells. Various topics by message type will be appended to this.
Possible values:
* The base queue name to be used when communicating between cells.
""")
]
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
default=['nova.cells.filters.all_filters'],
help="""
Scheduler filter classes.
Filter classes the cells scheduler should use. An entry of
"nova.cells.filters.all_filters" maps to all cells filters
included with nova. As of the Mitaka release the following
filter classes are available:
Different cell filter: A scheduler hint of 'different_cell'
with a value of a full cell name may be specified to route
a build away from a particular cell.
Image properties filter: Image metadata named
'hypervisor_version_requires' with a version specification
may be specified to ensure the build goes to a cell which
has hypervisors of the required version. If either the version
requirement on the image or the hypervisor capability of the
cell is not present, this filter returns without filtering out
the cells.
Target cell filter: A scheduler hint of 'target_cell' with a
value of a full cell name may be specified to route a build to
a particular cell. No error handling is done as there's no way
to know whether the full path is a valid.
As an admin user, you can also add a filter that directs builds
to a particular cell.
"""),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.cells.weights.all_weighers'],
help="""
Scheduler weight classes.
Weigher classes the cells scheduler should use. An entry of
"nova.cells.weights.all_weighers" maps to all cell weighers
included with nova. As of the Mitaka release the following
weight classes are available:
mute_child: Downgrades the likelihood of child cells being
chosen for scheduling requests, which haven't sent capacity
or capability updates in a while. Options include
mute_weight_multiplier (multiplier for mute children; value
should be negative).
ram_by_instance_type: Select cells with the most RAM capacity
for the instance type being requested. Because higher weights
win, Compute returns the number of available units for the
instance type requested. The ram_weight_multiplier option defaults
to 10.0 that adds to the weight by a factor of 10. Use a negative
number to stack VMs on one host instead of spreading out new VMs
to more hosts in the cell.
weight_offset: Allows modifying the database to weight a particular
cell. The highest weight will be the first cell to be scheduled for
launching an instance. When the weight_offset of a cell is set to 0,
it is unlikely to be picked but it could be picked if other cells
have a lower weight, like if they're full. And when the weight_offset
is set to a very high value (for example, '999999999999999'), it is
likely to be picked if another cell do not have a higher weight.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retries',
default=10,
help="""
Scheduler retries.
How many retries when no cells are available. Specifies how many
times the scheduler tries to launch a new instance when no cells
are available.
Possible values:
* Positive integer value
Related options:
* This value is used with the ``scheduler_retry_delay`` value
while retrying to find a suitable cell.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retry_delay',
default=2,
help="""
Scheduler retry delay.
Specifies the delay (in seconds) between scheduling retries when no
cell can be found to place the new instance on. When the instance
could not be scheduled to a cell after ``scheduler_retries`` in
combination with ``scheduler_retry_delay``, then the scheduling
of the instance failed.
Possible values:
* Time in seconds.
Related options:
* This value is used with the ``scheduler_retries`` value
while retrying to find a suitable cell.
""")
]
cell_state_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('db_check_interval',
default=60,
help="""
DB check interval.
Cell state manager updates cell status for all cells from the DB
only after this particular interval time is passed. Otherwise cached
status are used. If this value is 0 or negative all cell status are
updated from the DB whenever a state is needed.
Possible values:
* Interval time, in seconds.
"""),
cfg.StrOpt('cells_config',
help="""
Optional cells configuration.
Configuration file from which to read cells configuration. If given,
overrides reading cells from the database.
Cells store all inter-cell communication data, including user names
and passwords, in the database. Because the cells data is not updated
very frequently, use this option to specify a JSON file to store
cells data. With this configuration, the database is no longer
consulted when reloading the cells data. The file must have columns
present in the Cell model (excluding common database fields and the
id column). You must specify the queue connection information through
a transport_url field, instead of username, password, and so on.
The transport_url has the following form:
rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
Possible values:
The scheme can be either qpid or rabbit, the following sample shows
this optional configuration:
{
"parent": {
"name": "parent",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": true
},
"cell1": {
"name": "cell1",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit1.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
},
"cell2": {
"name": "cell2",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit2.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
}
}
""")
]
ALL_CELLS_OPTS = (cells_opts +
mute_weigher_opts +
ram_weigher_opts +
weigher_opts +
cell_manager_opts +
cell_messaging_opts +
cell_rpc_driver_opts +
cell_scheduler_opts +
cell_state_manager_opts)
def register_opts(conf):
conf.register_group(cells_group)
conf.register_opts(ALL_CELLS_OPTS, group=cells_group)
def list_opts():
return {cells_group: ALL_CELLS_OPTS}
| apache-2.0 | 7,724,184,318,643,564,000 | 29.319392 | 78 | 0.713632 | false |
AlexanderSavelyev/rdkit | rdkit/Chem/test_list.py | 1 | 2106 |
tests=[
("python","UnitTestChem.py",{}),
("python","UnitTestChemv2.py",{}),
("python","UnitTestChemAtom.py",{}),
("python","UnitTestChemBond.py",{}),
("python","UnitTestChemSmarts.py",{}),
("python","UnitTestFragmentDescriptors.py",{}),
("python","UnitTestGraphDescriptors.2.py",{}),
("python","UnitTestLipinski.py",{}),
("python","MCS.py",{}),
("python","UnitTestMCS.py",{}),
("python","UnitTestOldBugs.py",{}),
("python","UnitTestSATIS.py",{}),
("python","UnitTestSmiles.py",{}),
("python","UnitTestSuppliers.py",{}),
("python","UnitTestSurf.py",{}),
("python","UnitTestMol3D.py",{}),
("python","FragmentMatcher.py",{}),
("python","MACCSkeys.py",{}),
("python","Descriptors.py",{}),
("python","UnitTestCatalog.py",{}),
("python","TemplateAlign.py",{}),
("python","Recap.py",{}),
("python","BRICS.py",{}),
("python","UnitTestDescriptors.py",{}),
("python","AllChem.py",{}),
("python","PropertyMol.py",{}),
("python","UnitTestInchi.py",{}),
("python","SaltRemover.py",{}),
("python","UnitTestFunctionalGroups.py",{}),
("python","UnitTestCrippen.py",{}),
("python","__init__.py",{}),
("python","PandasTools.py",{}),
("python","test_list.py",{'dir':'AtomPairs'}),
("python","test_list.py",{'dir':'ChemUtils'}),
("python","test_list.py",{'dir':'EState'}),
("python","test_list.py",{'dir':'FeatMaps'}),
("python","test_list.py",{'dir':'Fingerprints'}),
("python","test_list.py",{'dir':'Pharm2D'}),
("python","test_list.py",{'dir':'Pharm3D'}),
#("python","test_list.py",{'dir':'Subshape'}),
("python","test_list.py",{'dir':'Suppliers'}),
("python","test_list.py",{'dir':'Scaffolds'}),
("python","test_list.py",{'dir':'Draw'}),
("python","test_list.py",{'dir':'Fraggle'}),
("python","test_list.py",{'dir':'SimpleEnum'}),
]
longTests=[
("python","UnitTestArom.py",{}),
("python","UnitTestGraphDescriptors.2.py -l",{}),
("python","UnitTestSurf.py -l",{}),
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
| bsd-3-clause | -6,715,822,941,456,160,000 | 32.967742 | 57 | 0.583571 | false |
bjamesv/pyweatherviz | daily_json_to_dict.py | 1 | 4201 | import api_info
from dateutil.parser import parse
import requests
import json
import pandas as pd
import logging
map_ghcn_by_date_tuple = {}
#dictionary, caching fully downloaded/parsed GHCN in memory
def get_ncei_daily_climate_dicts( date_start, date_xend):
"""
obtain daily Global Historical Climatology Network data, via disk cache
or NCEI web API registered developer token.
"""
# get climate dict from this module's in-memory cache
requested_period = (date_start,date_xend)
try:
ghcn_rows = map_ghcn_by_date_tuple[ requested_period ]
logging.info('Using inmemory NCEI data: {}'.format(requested_period))
except KeyError:
# fall back to disk cache, or NCEI RESTful api
list_raw_dicts = _get_list_ncei_daily_climate( date_start, date_xend)
# build dicts, & return the collection.
ghcn_rows = _get_daily_climate_dicts( list_raw_dicts)
# add to module's in-memory cache
map_ghcn_by_date_tuple[ requested_period] = ghcn_rows
return ghcn_rows
def _get_list_ncei_daily_climate( date_start, date_xend):
"""
returns collection of dicts, representing raw daily Global Historical
Climatology Network data.
"""
token = {'Token': api_info.key }
url = "https://www.ncdc.noaa.gov/cdo-web/api/v2/data?\
datasetid=GHCND&stationid=GHCND:USC00205567\
&startdate={start}&enddate={xend}\
&limit=1000"
dict_range={
'start': "{:%Y-%m-%d}".format( date_start)
,'xend' : "{:%Y-%m-%d}".format( date_xend)
}
file_cache = 'daily_json_{start}_{xend}.json'.format( **dict_range)
try:
cache = open( file_cache)
logging.info('Opening local NCEI cache: ({})'.format(file_cache))
list_json_response = json.load( cache)
except FileNotFoundError:
url_req = url.format( **dict_range)
msg = 'Local NCEI cache ({}) not found, downloading: ({})'
logging.info(msg.format(file_cache,url_req))
# default requests behavior for connect timeout (infinte wait?) was no
# good on a poorly configured IPv6 network (many, dead routes)
max_s = (5,45) #docs.python-requests.org/en/latest/user/advanced/#timeouts
list_json_response = requests.get( url_req, headers=token, timeout=max_s).json().get('results')
json.dump( list_json_response, open( file_cache, 'w'))
return list_json_response
def _get_daily_climate_dicts( list_daily_climate):
"""
returns collection of dicts, each representing one day of daily Global
Historical Climatolody Network data.
>>> l = [{'date':'2013-01-01T00:00:00','datatype':'TMAX','value':25}\
,{'date':'2013-01-01T00:00:00','datatype':'SNWD','value':175}\
,{'date':'2013-01-01T00:00:00','datatype':'PRCP','value':90}]
>>> out = _get_daily_climate_dicts( l)
>>> from pprint import pprint
>>> pprint( out)
[{'DATE': datetime.datetime(2013, 1, 1, 0, 0),
'PRCP_MM': 9.0,
'SNWD_MM': 175,
'TMAX_C': 2.5}]
"""
list_one_row_per_day = []
df_by_date = pd.DataFrame(list_daily_climate).groupby('date')
for str_group in df_by_date.groups.keys():
# build dict - add date
dict_day = {'DATE': parse(str_group)}
# extract TMAX
df_day = df_by_date.get_group( str_group)
if 'TMAX' in df_day.datatype.values:
tmax_tenth_degC = df_day[ df_day.datatype == 'TMAX'].value
dict_day['TMAX_C'] = int(tmax_tenth_degC) / 10
# extract TMIN
if 'TMIN' in df_day.datatype.values:
tmin_tenth_degC = df_day[ df_day.datatype == 'TMIN'].value
dict_day['TMIN_C'] = int(tmin_tenth_degC) / 10
# extract snow depth in mm
dict_day['SNWD_MM'] = 0
if 'SNWD' in df_day.datatype.values:
dict_day['SNWD_MM'] = int(df_day[ df_day.datatype == 'SNWD'].value)
# extract precipitation in mm
dict_day['PRCP_MM'] = 0
if 'PRCP' in df_day.datatype.values:
tenth_mm = int(df_day[ df_day.datatype == 'PRCP'].value)
dict_day['PRCP_MM'] = tenth_mm / 10
# add dict to list
list_one_row_per_day.append( dict_day)
return list_one_row_per_day
| gpl-3.0 | 8,346,215,010,290,300,000 | 40.594059 | 101 | 0.620567 | false |
ContinuumIO/dask | dask/dataframe/optimize.py | 2 | 4215 | """ Dataframe optimizations """
import operator
from dask.base import tokenize
from ..optimization import cull, fuse
from .. import config, core
from ..highlevelgraph import HighLevelGraph
from ..utils import ensure_dict
from ..blockwise import optimize_blockwise, fuse_roots, Blockwise
def optimize(dsk, keys, **kwargs):
if isinstance(dsk, HighLevelGraph):
# Think about an API for this.
flat_keys = list(core.flatten(keys))
dsk = optimize_read_parquet_getitem(dsk, keys=flat_keys)
dsk = optimize_blockwise(dsk, keys=flat_keys)
dsk = fuse_roots(dsk, keys=flat_keys)
dsk = ensure_dict(dsk)
if isinstance(keys, list):
dsk, dependencies = cull(dsk, list(core.flatten(keys)))
else:
dsk, dependencies = cull(dsk, [keys])
fuse_subgraphs = config.get("optimization.fuse.subgraphs")
if fuse_subgraphs is None:
fuse_subgraphs = True
dsk, dependencies = fuse(
dsk, keys, dependencies=dependencies, fuse_subgraphs=fuse_subgraphs,
)
dsk, _ = cull(dsk, keys)
return dsk
def optimize_read_parquet_getitem(dsk, keys):
# find the keys to optimize
from .io.parquet.core import ParquetSubgraph
read_parquets = [k for k, v in dsk.layers.items() if isinstance(v, ParquetSubgraph)]
layers = dsk.layers.copy()
dependencies = dsk.dependencies.copy()
for k in read_parquets:
columns = set()
update_blocks = {}
for dep in dsk.dependents[k]:
block = dsk.layers[dep]
# Check if we're a read_parquet followed by a getitem
if not isinstance(block, Blockwise):
# getitem are Blockwise...
return dsk
if len(block.dsk) != 1:
# ... with a single item...
return dsk
if list(block.dsk.values())[0][0] != operator.getitem:
# ... where this value is __getitem__...
return dsk
if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# ... but bail on the optimization if the getitem is what's requested
# These keys are structured like [('getitem-<token>', 0), ...]
# so we check for the first item of the tuple.
# See https://github.com/dask/dask/issues/5893
return dsk
block_columns = block.indices[1][0]
if isinstance(block_columns, str):
block_columns = [block_columns]
columns |= set(block_columns)
update_blocks[dep] = block
old = layers[k]
if columns and columns < set(old.meta.columns):
columns = list(columns)
meta = old.meta[columns]
name = "read-parquet-" + tokenize(old.name, columns)
assert len(update_blocks)
for block_key, block in update_blocks.items():
# (('read-parquet-old', (.,)), ( ... )) ->
# (('read-parquet-new', (.,)), ( ... ))
new_indices = ((name, block.indices[0][1]), block.indices[1])
numblocks = {name: block.numblocks[old.name]}
new_block = Blockwise(
block.output,
block.output_indices,
block.dsk,
new_indices,
numblocks,
block.concatenate,
block.new_axes,
)
layers[block_key] = new_block
dependencies[block_key] = {name}
dependencies[name] = dependencies.pop(k)
else:
# Things like df[df.A == 'a'], where the argument to
# getitem is not a column name
name = old.name
meta = old.meta
columns = list(meta.columns)
new = ParquetSubgraph(
name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs
)
layers[name] = new
if name != old.name:
del layers[old.name]
new_hlg = HighLevelGraph(layers, dependencies)
return new_hlg
| bsd-3-clause | 241,666,005,083,893,500 | 33.268293 | 88 | 0.551601 | false |
HonzaKral/warehouse | warehouse/packaging/models.py | 1 | 12439 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from citext import CIText
from pyramid.security import Allow
from pyramid.threadlocal import get_current_request
from sqlalchemy import (
CheckConstraint, Column, Enum, ForeignKey, ForeignKeyConstraint, Index,
Boolean, DateTime, Integer, Table, Text,
)
from sqlalchemy import func, orm, sql
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from warehouse import db
from warehouse.accounts.models import User
from warehouse.classifiers.models import Classifier
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils.attrs import make_repr
class Role(db.Model):
__tablename__ = "roles"
__table_args__ = (
Index("roles_pack_name_idx", "package_name"),
Index("roles_user_name_idx", "user_name"),
)
__repr__ = make_repr("role_name", "user_name", "package_name")
role_name = Column(Text)
user_name = Column(
CIText,
ForeignKey("accounts_user.username", onupdate="CASCADE"),
)
package_name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class ProjectFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, project):
try:
return self.request.db.query(Project).filter(
Project.normalized_name == func.normalize_pep426_name(project)
).one()
except NoResultFound:
raise KeyError from None
class Project(SitemapMixin, db.ModelBase):
__tablename__ = "packages"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="packages_valid_name",
),
)
__repr__ = make_repr("name")
name = Column(Text, primary_key=True, nullable=False)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
stable_version = Column(Text)
autohide = Column(Boolean, server_default=sql.true())
comments = Column(Boolean, server_default=sql.true())
bugtrack_url = Column(Text)
hosting_mode = Column(Text, nullable=False, server_default="pypi-only")
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
has_docs = Column(Boolean)
upload_limit = Column(Integer, nullable=True)
releases = orm.relationship(
"Release",
backref="project",
cascade="all, delete-orphan",
lazy="dynamic",
)
def __getitem__(self, version):
try:
return self.releases.filter(Release.version == version).one()
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm.object_session(self)
acls = []
# Get all of the users for this project.
query = session.query(Role).filter(Role.project == self)
query = query.options(orm.lazyload("project"))
query = query.options(orm.joinedload("user").lazyload("emails"))
for role in sorted(
query.all(),
key=lambda x: ["Owner", "Maintainer"].index(x.role_name)):
acls.append((Allow, role.user.id, ["upload"]))
return acls
@property
def documentation_url(self):
# TODO: Move this into the database and elimnate the use of the
# threadlocal here.
request = get_current_request()
# If the project doesn't have docs, then we'll just return a None here.
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
class DependencyKind(enum.IntEnum):
requires = 1
provides = 2
obsoletes = 3
requires_dist = 4
provides_dist = 5
obsoletes_dist = 6
requires_external = 7
# TODO: Move project URLs into their own table, since they are not actually
# a "dependency".
project_url = 8
class Dependency(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("rel_dep_name_idx", "name"),
Index("rel_dep_name_version_idx", "name", "version"),
Index("rel_dep_name_version_kind_idx", "name", "version", "kind"),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
__repr__ = make_repr("name", "version", "kind", "specifier")
name = Column(Text)
version = Column(Text)
kind = Column(Integer)
specifier = Column(Text)
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.name == Dependency.name,
Release.version == Dependency.version,
Dependency.kind == kind.value,
),
viewonly=True,
)
class Release(db.ModelBase):
__tablename__ = "releases"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("release_created_idx", cls.created.desc()),
Index("release_name_created_idx", cls.name, cls.created.desc()),
Index("release_name_idx", cls.name),
Index("release_pypi_hidden_idx", cls._pypi_hidden),
Index("release_version_idx", cls.version),
)
__repr__ = make_repr("name", "version")
name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
primary_key=True,
)
version = Column(Text, primary_key=True)
author = Column(Text)
author_email = Column(Text)
maintainer = Column(Text)
maintainer_email = Column(Text)
home_page = Column(Text)
license = Column(Text)
summary = Column(Text)
description = Column(Text)
keywords = Column(Text)
platform = Column(Text)
download_url = Column(Text)
_pypi_ordering = Column(Integer)
_pypi_hidden = Column(Boolean)
cheesecake_installability_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_documentation_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_code_kwalitee_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
requires_python = Column(Text)
description_from_readme = Column(Boolean)
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
_classifiers = orm.relationship(
Classifier,
backref="project_releases",
secondary=lambda: release_classifiers,
order_by=Classifier.classifier,
)
classifiers = association_proxy("_classifiers", "classifier")
files = orm.relationship(
"File",
backref="release",
cascade="all, delete-orphan",
lazy="dynamic",
order_by=lambda: File.filename,
)
dependencies = orm.relationship("Dependency")
_requires = _dependency_relation(DependencyKind.requires)
requires = association_proxy("_requires", "specifier")
_provides = _dependency_relation(DependencyKind.provides)
provides = association_proxy("_provides", "specifier")
_obsoletes = _dependency_relation(DependencyKind.obsoletes)
obsoletes = association_proxy("_obsoletes", "specifier")
_requires_dist = _dependency_relation(DependencyKind.requires_dist)
requires_dist = association_proxy("_requires_dist", "specifier")
_provides_dist = _dependency_relation(DependencyKind.provides_dist)
provides_dist = association_proxy("_provides_dist", "specifier")
_obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
_requires_external = _dependency_relation(DependencyKind.requires_external)
requires_external = association_proxy("_requires_external", "specifier")
_project_urls = _dependency_relation(DependencyKind.project_url)
project_urls = association_proxy("_project_urls", "specifier")
class File(db.Model):
__tablename__ = "release_files"
__table_args__ = (
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
Index("release_files_name_idx", "name"),
Index("release_files_name_version_idx", "name", "version"),
Index("release_files_packagetype_idx", "packagetype"),
Index("release_files_version_idx", "version"),
)
name = Column(Text)
version = Column(Text)
python_version = Column(Text)
packagetype = Column(
Enum(
"bdist_dmg", "bdist_dumb", "bdist_egg", "bdist_msi", "bdist_rpm",
"bdist_wheel", "bdist_wininst", "sdist",
),
)
comment_text = Column(Text)
filename = Column(Text, unique=True)
size = Column(Integer)
has_signature = Column(Boolean)
md5_digest = Column(Text, unique=True)
downloads = Column(Integer, server_default=sql.text("0"))
upload_time = Column(DateTime(timezone=False), server_default=func.now())
@hybrid_property
def path(self):
return "/".join([
self.python_version,
self.release.project.name[0],
self.release.project.name,
self.filename,
])
@path.expression
def path(self):
return func.concat_ws(
sql.text("'/'"),
self.python_version,
func.substring(self.name, sql.text("1"), sql.text("1")),
self.name,
self.filename,
)
@hybrid_property
def pgp_path(self):
return self.path + ".asc"
@pgp_path.expression
def pgp_path(self):
return func.concat(self.path, ".asc")
class Filename(db.ModelBase):
__tablename__ = "file_registry"
id = Column(Integer, primary_key=True, nullable=False)
filename = Column(Text, unique=True, nullable=False)
release_classifiers = Table(
"release_classifiers",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("trove_id", Integer(), ForeignKey("trove_classifiers.id")),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
Index("rel_class_name_idx", "name"),
Index("rel_class_name_version_idx", "name", "version"),
Index("rel_class_trove_id_idx", "trove_id"),
Index("rel_class_version_id_idx", "version"),
)
class JournalEntry(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls): # noqa
return (
Index(
"journals_changelog",
"submitted_date", "name", "version", "action",
),
Index("journals_id_idx", "id"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index(
"journals_latest_releases",
"submitted_date", "name", "version",
postgresql_where=(
(cls.version != None) & (cls.action == "new release") # noqa
),
),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text)
version = Column(Text)
action = Column(Text)
submitted_date = Column(DateTime(timezone=False))
_submitted_by = Column(
"submitted_by",
CIText,
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
),
)
submitted_by = orm.relationship(User)
submitted_from = Column(Text)
| apache-2.0 | 3,921,414,559,749,365,000 | 29.045894 | 81 | 0.614438 | false |
Wintermute0110/plugin.program.advanced.MAME.launcher | dev-graphics/test_generate_fanart.py | 1 | 5637 | #!/usr/bin/python
#
#
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#
# Scales and centers img into a box of size (box_x_size, box_y_size).
# Scaling keeps original img aspect ratio.
# Returns an image of size (box_x_size, box_y_size)
#
def PIL_resize_proportional(img, layout, dic_key, CANVAS_COLOR = (0, 0, 0)):
box_x_size = layout[dic_key]['width']
box_y_size = layout[dic_key]['height']
# log_debug('PIL_resize_proportional() Initialising ...')
# log_debug('img X_size = {} | Y_size = {}'.format(img.size[0], img.size[1]))
# log_debug('box X_size = {} | Y_size = {}'.format(box_x_size, box_y_size))
# --- First try to fit X dimension ---
# log_debug('PIL_resize_proportional() Fitting X dimension')
wpercent = (box_x_size / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
r_x_size = box_x_size
r_y_size = hsize
x_offset = 0
y_offset = (box_y_size - r_y_size) / 2
# log_debug('resize X_size = {} | Y_size = {}'.format(r_x_size, r_y_size))
# log_debug('resize x_offset = {} | y_offset = {}'.format(x_offset, y_offset))
# --- Second try to fit Y dimension ---
if y_offset < 0:
# log_debug('Fitting Y dimension')
hpercent = (box_y_size / float(img.size[1]))
wsize = int((float(img.size[0]) * float(hpercent)))
r_x_size = wsize
r_y_size = box_y_size
x_offset = (box_x_size - r_x_size) / 2
y_offset = 0
# log_debug('resize X_size = {} | Y_size = {}'.format(r_x_size, r_y_size))
# log_debug('resize x_offset = {} | y_offset = {}'.format(x_offset, y_offset))
# >> Create a new image and paste original image centered.
canvas_img = Image.new('RGB', (box_x_size, box_y_size), CANVAS_COLOR)
# >> Resize and paste
img = img.resize((r_x_size, r_y_size), Image.ANTIALIAS)
canvas_img.paste(img, (x_offset, y_offset, x_offset + r_x_size, y_offset + r_y_size))
return canvas_img
def PIL_paste_image(img, img_title, layout, dic_key):
box = (
layout[dic_key]['x_pos'],
layout[dic_key]['y_pos'],
layout[dic_key]['x_pos'] + layout[dic_key]['x_size'],
layout[dic_key]['y_pos'] + layout[dic_key]['y_size']
)
img.paste(img_title, box)
return img
# --- Fanart layout ---
layout = {
'title' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 50, 'y_pos' : 50},
'snap' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 50, 'y_pos' : 550},
'flyer' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 1420, 'y_pos' : 50},
'cabinet' : {'x_size' : 300, 'y_size' : 425, 'x_pos' : 1050, 'y_pos' : 625},
'artpreview' : {'x_size' : 450, 'y_size' : 550, 'x_pos' : 550, 'y_pos' : 500},
'PCB' : {'x_size' : 300, 'y_size' : 300, 'x_pos' : 1500, 'y_pos' : 525},
'clearlogo' : {'x_size' : 450, 'y_size' : 200, 'x_pos' : 1400, 'y_pos' : 850},
'cpanel' : {'x_size' : 300, 'y_size' : 100, 'x_pos' : 1050, 'y_pos' : 500},
'marquee' : {'x_size' : 800, 'y_size' : 275, 'x_pos' : 550, 'y_pos' : 200},
'text' : { 'x_pos' : 550, 'y_pos' : 50, 'size' : 72},
}
# --- Create fanart canvas ---
img = Image.new('RGB', (1920, 1080), (0, 0, 0))
draw = ImageDraw.Draw(img)
font_mono = ImageFont.truetype('../fonts/Inconsolata.otf', layout['text']['size'])
# --- Title and Snap (colour rectangle for placement) ---
# img_title = Image.new('RGB', (TITLE_X_SIZE, TITLE_Y_SIZE), (25, 25, 25))
# img_snap = Image.new('RGB', (SNAP_X_SIZE, SNAP_Y_SIZE), (0, 200, 0))
# print('Title X_size = {} | img Y_size = {}'.format(img_title.size[0], img_title.size[1]))
# print(img_title.format, img_title.size, img_title.mode)
# --- Title and Snap (open PNG actual screenshot) ---
img_title = Image.open('dino_title.png')
img_snap = Image.open('dino_snap.png')
img_artpreview = Image.open('dino_artpreview.png')
img_cabinet = Image.open('dino_cabinet.png')
img_clearlogo = Image.open('dino_clearlogo.png')
img_cpanel = Image.open('dino_cpanel.png')
img_flyer = Image.open('dino_flyer.png')
img_marquee = Image.open('dino_marquee.png')
img_PCB = Image.open('dino_PCB.png')
# --- Resize keeping aspect ratio ---
img_title = PIL_resize_proportional(img_title, layout, 'title')
img_snap = PIL_resize_proportional(img_snap, layout, 'snap')
img_artpreview = PIL_resize_proportional(img_artpreview, layout, 'artpreview')
img_cabinet = PIL_resize_proportional(img_cabinet, layout, 'cabinet')
img_clearlogo = PIL_resize_proportional(img_clearlogo, layout, 'clearlogo')
img_cpanel = PIL_resize_proportional(img_cpanel, layout, 'cpanel')
img_flyer = PIL_resize_proportional(img_flyer, layout, 'flyer')
img_marquee = PIL_resize_proportional(img_marquee, layout, 'marquee')
img_PCB = PIL_resize_proportional(img_PCB, layout, 'PCB')
# --- Compsite fanart ---
# NOTE The box dimensions must have the same size as the pasted image.
img = PIL_paste_image(img, img_title, layout, 'title')
img = PIL_paste_image(img, img_snap, layout, 'snap')
img = PIL_paste_image(img, img_artpreview, layout, 'artpreview')
img = PIL_paste_image(img, img_cabinet, layout, 'cabinet')
img = PIL_paste_image(img, img_clearlogo, layout, 'clearlogo')
img = PIL_paste_image(img, img_cpanel, layout, 'cpanel')
img = PIL_paste_image(img, img_flyer, layout, 'flyer')
img = PIL_paste_image(img, img_marquee, layout, 'marquee')
img = PIL_paste_image(img, img_PCB, layout, 'PCB')
# --- Print machine name ---
draw.text((layout['text']['x_pos'], layout['text']['y_pos']),
'dino', (255, 255, 255), font = font_mono)
# --- Save test fanart ---
img.save('fanart.png')
| gpl-2.0 | -9,040,719,822,525,605,000 | 43.738095 | 95 | 0.604932 | false |
abreen/socrates.py | logisim/subcircuit.py | 1 | 8875 | from logisim.util import num_rotations
from logisim.errors import NoValueGivenError
from logisim.debug import narrate, suppress_narration
from logisim.location import Location
from logisim.component import Component
from logisim.pins import InputPin, OutputPin
class Subcircuit(Component):
def __init__(self, circuit, defaults=None):
# Logisim global defaults
self.facing = 'east'
Component.__init__(self, defaults)
# reference to Circuit object
self.circuit = circuit
self.label = circuit.name
# TODO custom subcircuit appearance
self.appearance = None
def get_output_locations(self):
"""Use the underlying Circuit object's appearance data
(or the default logic) to produce a list of output pin locations.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
return [loc for loc, pin in locs.items() if type(pin) is OutputPin]
else:
raise NotImplementedError
def get_input_locations(self):
"""Use the underlying Circuit object's appearance data
(or the default logic) to produce a list of input pin locations.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
return [loc for loc, pin in locs.items() if type(pin) is InputPin]
else:
raise NotImplementedError
def get_pin_at(self, loc):
"""Given the location of a pin on this subcircuit, return
the pin at that location. This method produces the location of the
pin on this subcircuit's representation, not the location of the pin
on the underlying circuit's coordinate plane.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
else:
raise NotImplementedError
for pin_loc, pin in locs.items():
if pin_loc == loc:
return pin
return None
def eval(self, at_loc):
if not self.appearance:
pins = _default_subcircuit_locations(self)
input_vals = {}
for in_pin_loc, tup in self.input_from.items():
component, out_pin_loc = tup
in_pin = pins[in_pin_loc]
try:
input_vals[in_pin] = component.eval(at_loc=out_pin_loc)
except NoValueGivenError:
# this subcircuit might still work, if this input pin is
# never used in the underlying circuit, so we don't
# do anything now
continue
output_vals = self.circuit.eval(input_vals)
return output_vals[pins[at_loc]]
else:
raise NotImplementedError
def _default_subcircuit_locations(subcircuit):
circuit = subcircuit.circuit
# for a subcircuit's default appearance, Logisim places each pin on
# an edge of the subcircuit rectangle by which direction they face in
# the actual circuit
pins_facing = {'north': [], 'east': [], 'south': [], 'west': []}
for pin in circuit.input_pins:
pins_facing[pin.facing].append(pin)
for pin in circuit.output_pins:
pins_facing[pin.facing].append(pin)
# sort the pins the way Logisim would sort them (for each facing
# direction, left to right or top to bottom)
for facing in pins_facing:
if facing in ['east', 'west']:
pins_facing[facing].sort(key=lambda pin: pin.loc.y)
else:
pins_facing[facing].sort(key=lambda pin: pin.loc.x)
# we construct a 2D list representing the subcircuit's appearance
top = pins_facing['south']
bottom = pins_facing['north']
left = pins_facing['east']
right = pins_facing['west']
# n rows, m columns
n = max(len(left), len(right))
m = max(len(top), len(bottom))
corner_spacing = (top or bottom) and (left or right)
if corner_spacing:
m += 2
n += 2
top = [None] + top + [None] if top else top
bottom = [None] + bottom + [None] if bottom else bottom
left = [None] + left + [None] if left else left
right = [None] + right + [None] if right else right
n = max(n, 4)
m = max(m, 4)
pin_layout = _make2d(n, m)
if top:
_overwrite_row(pin_layout, 0, top)
if bottom:
_overwrite_row(pin_layout, n - 1, bottom)
if left:
_overwrite_col(pin_layout, 0, left)
if right:
_overwrite_col(pin_layout, m - 1, right)
# we have the subcircuit's location, which is the location of what
# Logisim calls its "anchor"; by default, the anchor is placed over
# the first pin facing west (then south, east, and north, if there
# is no such pin)
# we will find the position of the anchor pin (the position being its
# row and column index into the 'pin_layout' 2-D list)
if len(pins_facing['west']) > 0:
# pins on the right
anchor_pos = (1 if corner_spacing else 0, m - 1)
elif len(pins_facing['south']) > 0:
# pins on the top
anchor_pos = (0, 1 if corner_spacing else 0)
elif len(pins_facing['east']) > 0:
# pins on the left
anchor_pos = (1 if corner_spacing else 0, 0)
elif len(pins_facing['north']) > 0:
# pins on the bottom
anchor_pos = (n - 1, 1 if corner_spacing else 0)
else:
# TODO subcircuit has no pins?
pass
# if this subcircuit is not facing east (the default), rotate the
# 2-D list and change the anchor position accordingly
rotations = num_rotations('east', subcircuit.facing)
if rotations != 0:
pin_layout, anchor_pos = _rotate(pin_layout, anchor_pos, rotations)
# redefine: n rows, m columns, if this rotate changed them
n, m = len(pin_layout), len(pin_layout[0])
x, y = subcircuit.loc.x, subcircuit.loc.y
# finds location of each pin given the subcircuit's anchor
# position by finding each position's difference in position
# in the list, and using that to find its absolute position
def pin_location(val, row, col):
y_offset = row - anchor_pos[0]
x_offset = col - anchor_pos[1]
return Location(x + (x_offset * 10), y + (y_offset * 10))
pin_locs = _map2d(pin_location, pin_layout)
return {pin_locs[r][c]: pin_layout[r][c]
for r in range(n) for c in range(m)
if type(pin_layout[r][c]) is not None}
def _map2d(f, list2d):
new_list2d = []
for r in range(len(list2d)):
new_row = []
for c in range(len(list2d[r])):
new_row.append(f(list2d[r][c], r, c))
new_list2d.append(new_row)
return new_list2d
def _make2d(rows, cols):
return [[None for _ in range(cols)] for _ in range(rows)]
def _overwrite_row(list_, index, row):
"""Given a reference to a 2-D list and a row index, replace the
row with the values in the new row. If the new row has fewer columns
than the existing one, the new row is centered and Nones are added
as padding.
"""
cols = len(list_[index])
if cols < len(row):
raise ValueError("row is too big ({}, expected {})".format(len(row),
cols))
elif cols == len(row):
new_row = row
else:
left = [None] * ((cols - len(row)) // 2)
right = [None] * (cols - len(row) - len(left))
new_row = left + row + right
for c in range(cols):
list_[index][c] = new_row[c]
def _overwrite_col(list_, index, col):
"""See overwrite_row(). This function does the same thing, but
column-wise.
"""
rows = len(list_)
if rows < len(col):
raise ValueError("column is too big ({}, expected {})".format(len(col),
rows))
elif rows == len(col):
new_col = col
else:
above = [None] * ((rows - len(col)) // 2)
below = [None] * (rows - len(col) - len(above))
new_col = above + col + below
for r in range(rows):
list_[r][index] = new_col[r]
def _rotate(pin_layout, anchor_pos, times):
for n in range(times):
anchor_pos = _rotate90_pos(anchor_pos, len(pin_layout))
pin_layout = _rotate90_2d(pin_layout)
return pin_layout, anchor_pos
def _rotate90_pos(anchor_pos, num_rows):
row_index, col_index = anchor_pos
return (col_index, num_rows - row_index - 1)
def _rotate90_2d(list_):
rows, cols = len(list_), len(list_[0])
rotated = [[None for _ in range(rows)] for _ in range(cols)]
for r in range(rows):
for c in range(cols):
new_r, new_c = _rotate90_pos((r, c), rows)
rotated[new_r][new_c] = list_[r][c]
return rotated
| gpl-2.0 | 3,178,043,507,803,182,600 | 30.810036 | 79 | 0.586479 | false |
muchu1983/104_cameo | cameo/utility.py | 1 | 31383 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import re
import json
import time
import datetime
import dateparser
import pkg_resources
import shutil
import smtplib
import logging
import random
from email.mime.text import MIMEText
from scrapy import Selector
from geopy.geocoders import GoogleV3
from bennu.filesystemutility import FileSystemUtility
#共用工具程式
class Utility:
#建構子
def __init__(self):
self.fsUtil = FileSystemUtility()
self.strListOfCountryByContinentJsonFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent.json")
self.parseListOfCountryWikiPage()
#email helper setting
self.DEFAULT_SMTP = "smtp.gmail.com:587"
self.DEFAULT_ACCOUNT = "[email protected]"
self.DEFAULT_PASSWORD = "cameo70525198"
#寄送 email
def sendEmail(self, strSubject=None, strFrom=None, strTo=None, strMsg=None, lstStrTarget=None, strSmtp=None, strAccount=None, strPassword=None):
if not strSmtp:
strSmtp = self.DEFAULT_SMTP
if not strAccount:
strAccount = self.DEFAULT_ACCOUNT
if not strPassword:
strPassword = self.DEFAULT_PASSWORD
msg = MIMEText(strMsg)
msg["Subject"] = strSubject
msg["From"] = strFrom
msg["To"] = strTo
try:
server = smtplib.SMTP(strSmtp)
server.ehlo()
server.starttls()
server.login(strAccount, strPassword)
server.sendmail(strAccount, lstStrTarget, msg.as_string())
server.quit()
except Exception, e:
logging.error("[eMail Helper] Sending email failed! ErrorMessage: %s"%str(e))
#儲存檔案
def overwriteSaveAs(self, strFilePath=None, unicodeData=None):
with open(strFilePath, "w+") as file:
file.write(unicodeData.encode("utf-8"))
#讀取 json 檔案內容,回傳 dict 物件
def readObjectFromJsonFile(self, strJsonFilePath=None):
dicRet = None
with open(strJsonFilePath, "r") as jsonFile:
dicRet = json.load(jsonFile, encoding="utf-8")
return dicRet
#將 dict 物件的內容寫入到 json 檔案內
def writeObjectToJsonFile(self, dicData=None, strJsonFilePath=None):
with open(strJsonFilePath, "w+") as jsonFile:
jsonFile.write(json.dumps(dicData, ensure_ascii=False, indent=4, sort_keys=True).encode("utf-8"))
#取得子目錄的路徑
def getSubFolderPathList(self, strBasedir=None):
lstStrSubFolderPath = []
for base, dirs, files in os.walk(strBasedir):
if base == strBasedir:
for dir in dirs:
strFolderPath = base + "\\" + dir
lstStrSubFolderPath.append(strFolderPath)
return lstStrSubFolderPath
#取得 strBasedir 目錄中,檔名以 strSuffixes 結尾的檔案路徑
def getFilePathListWithSuffixes(self, strBasedir=None, strSuffixes=None):
lstStrFilePathWithSuffixes = []
for base, dirs, files in os.walk(strBasedir):
if base == strBasedir:#just check base dir
for strFilename in files:
if strFilename.endswith(strSuffixes):#find target files
strFilePath = base + "\\" + strFilename
lstStrFilePathWithSuffixes.append(strFilePath)
return lstStrFilePathWithSuffixes
#深層取得 strBasedir 目錄中,檔名以 strSuffixes 結尾的檔案路徑
def recursiveGetFilePathListWithSuffixes(self, strBasedir=None, strSuffixes=None):
lstStrFilePathWithSuffixes = []
for base, dirs, files in os.walk(strBasedir):
for strFilename in files:
if strFilename.endswith(strSuffixes):#find target files
strFilePath = base + "\\" + strFilename
lstStrFilePathWithSuffixes.append(strFilePath)
return lstStrFilePathWithSuffixes
#轉換 簡化數字字串 成 純數字 (ex:26.3k -> 26300)
def translateNumTextToPureNum(self, strNumText=None):
strNumText = strNumText.lower()
fPureNum = 0.0
strFloatPartText = re.match("^([0-9\.]*)k?m?$", strNumText)
if strFloatPartText != None:
strFloatPartText = strFloatPartText.group(1)
if strNumText.endswith("k"):
fPureNum = float(strFloatPartText) * 1000
elif strNumText.endswith("m"):
fPureNum = float(strFloatPartText) * 1000000
else:
fPureNum = float(strFloatPartText) * 1
return int(fPureNum)
#轉換 剩餘日期表示字串 成 純數字
def translateTimeleftTextToPureNum(self, strTimeleftText=None, strVer=None):
dicVer = {"INDIEGOGO": self.translateTimeleftTextToPureNum_INDIEGOGO,
"WEBACKERS": self.translateTimeleftTextToPureNum_WEBACKERS}
return dicVer[strVer](strTimeleftText=strTimeleftText)
#轉換 剩餘日期表示字串 成 純數字 (ex:100 day left -> 100)
def translateTimeleftTextToPureNum_INDIEGOGO(self, strTimeleftText=None):
intDays = 0
if strTimeleftText == None:
return intDays
strTimeleftText = strTimeleftText.lower().strip()
if "hours left" in strTimeleftText:
strHoursText = re.match("^([0-9]*) hours left$", strTimeleftText)
if strHoursText != None:
strHoursText = strHoursText.group(1)
intDays = (int(strHoursText)+24)/24 #不足24h以1天計
elif "days left" in strTimeleftText:
strDaysText = re.match("^([0-9]*) days left$", strTimeleftText)
if strDaysText != None:
strDaysText = strDaysText.group(1)
intDays = int(strDaysText)
else:
intDays = 0
return intDays
#剩餘日期轉換為日數 (ex.2個月13天後結束 -> 73天)
def translateTimeleftTextToPureNum_WEBACKERS(self, strTimeleftText=None):
intDays = 0
if strTimeleftText is not None:
if strTimeleftText in (u"已完成", u"已結束"):
return 0
strMonth = re.match(u"^([0-9]*)個月[0-9]*天後結束$", strTimeleftText)
strDay = re.match(u"^[0-9]*?個?月?([0-9]*)天後結束$", strTimeleftText)
if strMonth is not None:
strMonth = strMonth.group(1)
intDays = intDays + (int(strMonth)*30)
if strDay is not None:
strDay = strDay.group(1)
intDays = intDays + int(strDay)
return intDays
#取得檔案的建立日期
def getCtimeOfFile(self, strFilePath=None):
fCTimeStamp = os.path.getctime(strFilePath)
dtCTime = datetime.datetime.fromtimestamp(fCTimeStamp)
strCTime = dtCTime.strftime("%Y-%m-%d")
return strCTime
#使用 geopy 整理原始地區資訊
def geopyGeocode(self, strOriginLocation=""):
lstStrApiKey = [
u"AIzaSyB71s7yWXJajGDgfZXHGBXYnOww6eLx9vU",
u"AIzaSyDFYBYcwMkicRxE1hVUIHVNk5K2UFvV9Yk",
u"AIzaSyCCU72G1ID4zIfWN8I8zeoRtkLWFSG_jC8",
u"AIzaSyDc71hTtE2XTTiVnad-Jz3rXe338VcqWBY"
]
geolocator = GoogleV3(api_key=random.choice(lstStrApiKey))
time.sleep(1) #避免太快送出 geopy 查詢
location = None
try:
location = geolocator.geocode(strOriginLocation, exactly_one=True)
except:
logging.error("[geopy error] find geocode faild. origin string: %s"%strOriginLocation)
(strAddress, fLatitude, fLongitude) = (None, 0, 0)
if location is not None:
strAddress = location.address
fLatitude = location.latitude
fLongitude = location.longitude
return (strAddress, fLatitude, fLongitude)
#解析 list_of_country_by_continent_on_wikipedia.html
def parseListOfCountryWikiPage(self):
strLOCBCWikiPageFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent_on_wikipedia.html")
strParsedResultJsonFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent.json")
dicCountryNameCodeMapping = {}
strISO3166WikiPageFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="iso_3166_1_on_wikipedia.html")
with open(strISO3166WikiPageFilePath, "r") as pageISO3166File: #parse iso_3166_1_on_wikipedia.html
strPageSource = pageISO3166File.read()
root = Selector(text=strPageSource)
elesCountryTr = root.css("table.wikitable:nth-of-type(1) tbody tr")
for eleCountryTr in elesCountryTr:
strCountryNameText = eleCountryTr.css("td:nth-of-type(1) a::text").extract_first().lower()
strCountryCodeText = eleCountryTr.css("td:nth-of-type(2) a span::text").extract_first()
dicCountryNameCodeMapping[strCountryNameText] = strCountryCodeText
with open(strLOCBCWikiPageFilePath, "r") as pageLOCBCFile: #parse list_of_country_by_continent_on_wikipedia.html
strPageSource = pageLOCBCFile.read()
root = Selector(text=strPageSource)
elesContinentTable = root.css("table.wikitable")
dicParsedResult = {}
dicContinentName = {0:"AF", 1:"AS", 2:"EU", 3:"NA",
4:"SA", 5:"OC", 6:"AN"}
for intCurrentTableIndex, eleContinentTable in enumerate(elesContinentTable):
lstDicCountryData = []
lstStrCountryName = eleContinentTable.css("tr td:nth-of-type(2) i > a::text, tr td:nth-of-type(2) b > a::text").extract()
for strCountryName in lstStrCountryName:
dicCountryData = {}
#country name
dicCountryData["name"] = strCountryName.lower()
#country iso-3316-1 code
dicCountryData["code"] = None
for strCountryNameKey in dicCountryNameCodeMapping:
if re.search(dicCountryData["name"], strCountryNameKey):
dicCountryData["code"] = dicCountryNameCodeMapping[strCountryNameKey]
if dicCountryData.get("code", None) is not None:
lstDicCountryData.append(dicCountryData)
dicParsedResult[dicContinentName[intCurrentTableIndex]] = lstDicCountryData
#自訂資料區
dicParsedResult["NA"].append({"name":"united states", "code":"US"})
dicParsedResult["NA"].append({"name":"usa", "code":"US"})
dicParsedResult["EU"].append({"name":"uk", "code":"GB"})
self.writeObjectToJsonFile(dicData=dicParsedResult, strJsonFilePath=strParsedResultJsonFilePath)
#取得國家簡碼 IOS-3166-1
def getCountryCode(self, strCountryName=None):
dicListOfCountryByContinent = self.readObjectFromJsonFile(strJsonFilePath=self.strListOfCountryByContinentJsonFilePath)
strCountryCodeMatched = None
if strCountryName: # is not None
for strContinentName in dicListOfCountryByContinent:
lstDicCountryData = dicListOfCountryByContinent[strContinentName]
for dicCountryData in lstDicCountryData:
if unicode(strCountryName.lower().strip()) == dicCountryData["name"]:
strCountryCodeMatched = dicCountryData["code"]
return strCountryCodeMatched
#使用 wiki 頁面 查找 洲別 資料 (list_of_country_by_continent.json)
def getContinentByCountryNameWikiVersion(self, strCountryName=None):
dicListOfCountryByContinent = self.readObjectFromJsonFile(strJsonFilePath=self.strListOfCountryByContinentJsonFilePath)
strContinentNameMatched = None
if strCountryName:# is not None
for strContinentName in dicListOfCountryByContinent:
lstDicCountryData = dicListOfCountryByContinent[strContinentName]
for dicCountryData in lstDicCountryData:
if unicode(strCountryName.lower().strip()) == dicCountryData["name"]:
strContinentNameMatched = strContinentName
return strContinentNameMatched
#以 dateparser 模組轉換日期
def parseStrDateByDateparser(self, strOriginDate=None, strBaseDate=datetime.datetime.now().strftime("%Y-%m-%d")):
strParsedDateBaseOnGivenBaseDate = None
dtBaseDate = datetime.datetime.strptime(strBaseDate, "%Y-%m-%d")
dToday = datetime.date.today()
dtToday = datetime.datetime.combine(dToday, datetime.datetime.min.time())
timedeltaNowToBase = dtToday - dtBaseDate
if strOriginDate: #is not None
dtParsedDateBaseOnNow = dateparser.parse(strOriginDate)
if dtParsedDateBaseOnNow:#is not None
strParsedDateBaseOnGivenBaseDate = (dtParsedDateBaseOnNow - timedeltaNowToBase).strftime("%Y-%m-%d")
return strParsedDateBaseOnGivenBaseDate
#如果沒有重覆,附加一行文字至 txt 檔案的最後面
def appendLineToTxtIfNotExists(self, strTxtFilePath=None, strLine=None):
lstStrLineInTxt = []
strLine = strLine.strip() + u"\n"
if os.path.exists(strTxtFilePath):
with open(strTxtFilePath, "r") as txtFile:
lstStrLineInTxt = txtFile.readlines()
if strLine not in lstStrLineInTxt:#檢查有無重覆
with open(strTxtFilePath, "a") as txtFile:
#append line to .txt
txtFile.write(strLine)
#將字串陣列先一一去除換行符 接著合併之後再 strip
def stripTextArray(self, lstStrText=None):
strTextLine = u""
for strText in lstStrText:
if strText is not None:
strText = re.sub("\s", " ", strText)
strTextLine = strTextLine + u" " + strText.strip()
return strTextLine.strip()
#測試 crunchbase html 檔案重新命名
def crunchbaseOrganizationHtmlFileRename(self, strSourceFolder=None, strTargetFolder=None):
lstStrSourceHtmlFilePath = self.getFilePathListWithSuffixes(strBasedir=strSourceFolder, strSuffixes="crunchbase.html")
lstStrSourceHtmlFilePath = lstStrSourceHtmlFilePath + self.getFilePathListWithSuffixes(strBasedir=strSourceFolder, strSuffixes="crunchbase.htm")
for strSourceHtmlFilePath in lstStrSourceHtmlFilePath:
strCrunchbaseId = re.search("^.*\\\\(.*)crunchbase.html?$", strSourceHtmlFilePath).group(1)
strCrunchbaseId = re.sub("[^a-zA-Z0-9]+", "-", strCrunchbaseId.lower()).strip("-")
strTargetHtmlFilePath = strTargetFolder + u"\\%s_organization.html"%strCrunchbaseId
shutil.copy(strSourceHtmlFilePath, strTargetHtmlFilePath)
#使用 國家對照表 查找 洲別 資料
def getContinentByCountryName(self, strCountryName=None):
countries = [
{"code": "AD", "continent": "Europe", "name": "Andorra"},
{"code": "AF", "continent": "Asia", "name": "Afghanistan"},
{"code": "AG", "continent": "North America", "name": "Antigua and Barbuda"},
{"code": "AL", "continent": "Europe", "name": "Albania"},
{"code": "AM", "continent": "Asia", "name": "Armenia"},
{"code": "AO", "continent": "Africa", "name": "Angola"},
{"code": "AR", "continent": "South America", "name": "Argentina"},
{"code": "AT", "continent": "Europe", "name": "Austria"},
{"code": "AU", "continent": "Oceania", "name": "Australia"},
{"code": "AZ", "continent": "Asia", "name": "Azerbaijan"},
{"code": "BB", "continent": "North America", "name": "Barbados"},
{"code": "BD", "continent": "Asia", "name": "Bangladesh"},
{"code": "BE", "continent": "Europe", "name": "Belgium"},
{"code": "BF", "continent": "Africa", "name": "Burkina Faso"},
{"code": "BG", "continent": "Europe", "name": "Bulgaria"},
{"code": "BH", "continent": "Asia", "name": "Bahrain"},
{"code": "BI", "continent": "Africa", "name": "Burundi"},
{"code": "BJ", "continent": "Africa", "name": "Benin"},
{"code": "BN", "continent": "Asia", "name": "Brunei Darussalam"},
{"code": "BO", "continent": "South America", "name": "Bolivia"},
{"code": "BR", "continent": "South America", "name": "Brazil"},
{"code": "BS", "continent": "North America", "name": "Bahamas"},
{"code": "BT", "continent": "Asia", "name": "Bhutan"},
{"code": "BW", "continent": "Africa", "name": "Botswana"},
{"code": "BY", "continent": "Europe", "name": "Belarus"},
{"code": "BZ", "continent": "North America", "name": "Belize"},
{"code": "CA", "continent": "North America", "name": "Canada"},
{"code": "CD", "continent": "Africa", "name": "Democratic Republic of the Congo"},
{"code": "CG", "continent": "Africa", "name": "Republic of the Congo"},
{"code": "CI", "continent": "Africa", "name": u"Côte d'Ivoire"},
{"code": "CI", "continent": "Africa", "name": u"Cote d'Ivoire"},
{"code": "CL", "continent": "South America", "name": "Chile"},
{"code": "CM", "continent": "Africa", "name": "Cameroon"},
{"code": "CN", "continent": "Asia", "name": u"People's Republic of China"},
{"code": "CN", "continent": "Asia", "name": u"China"},
{"code": "CO", "continent": "South America", "name": "Colombia"},
{"code": "CR", "continent": "North America", "name": "Costa Rica"},
{"code": "CU", "continent": "North America", "name": "Cuba"},
{"code": "CV", "continent": "Africa", "name": "Cape Verde"},
{"code": "CY", "continent": "Asia", "name": "Cyprus"},
{"code": "CZ", "continent": "Europe", "name": "Czech Republic"},
{"code": "DE", "continent": "Europe", "name": "Germany"},
{"code": "DJ", "continent": "Africa", "name": "Djibouti"},
{"code": "DK", "continent": "Europe", "name": "Denmark"},
{"code": "DM", "continent": "North America", "name": "Dominica"},
{"code": "DO", "continent": "North America", "name": "Dominican Republic"},
{"code": "EC", "continent": "South America", "name": "Ecuador"},
{"code": "EE", "continent": "Europe", "name": "Estonia"},
{"code": "EG", "continent": "Africa", "name": "Egypt"},
{"code": "ER", "continent": "Africa", "name": "Eritrea"},
{"code": "ET", "continent": "Africa", "name": "Ethiopia"},
{"code": "FI", "continent": "Europe", "name": "Finland"},
{"code": "FJ", "continent": "Oceania", "name": "Fiji"},
{"code": "FR", "continent": "Europe", "name": "France"},
{"code": "GA", "continent": "Africa", "name": "Gabon"},
{"code": "GE", "continent": "Asia", "name": "Georgia"},
{"code": "GH", "continent": "Africa", "name": "Ghana"},
{"code": "GM", "continent": "Africa", "name": "The Gambia"},
{"code": "GN", "continent": "Africa", "name": "Guinea"},
{"code": "GR", "continent": "Europe", "name": "Greece"},
{"code": "GT", "continent": "North America", "name": "Guatemala"},
{"code": "GT", "continent": "North America", "name": "Haiti"},
{"code": "GW", "continent": "Africa", "name": "Guinea-Bissau"},
{"code": "GY", "continent": "South America", "name": "Guyana"},
{"code": "HN", "continent": "North America", "name": "Honduras"},
{"code": "HU", "continent": "Europe", "name": "Hungary"},
{"code": "ID", "continent": "Asia", "name": "Indonesia"},
{"code": "IE", "continent": "Europe", "name": "Republic of Ireland"},
{"code": "IL", "continent": "Asia", "name": "Israel"},
{"code": "IN", "continent": "Asia", "name": "India"},
{"code": "IQ", "continent": "Asia", "name": "Iraq"},
{"code": "IR", "continent": "Asia", "name": "Iran"},
{"code": "IS", "continent": "Europe", "name": "Iceland"},
{"code": "IT", "continent": "Europe", "name": "Italy"},
{"code": "JM", "continent": "North America", "name": "Jamaica"},
{"code": "JO", "continent": "Asia", "name": "Jordan"},
{"code": "JP", "continent": "Asia", "name": "Japan"},
{"code": "KE", "continent": "Africa", "name": "Kenya"},
{"code": "KG", "continent": "Asia", "name": "Kyrgyzstan"},
{"code": "KI", "continent": "Oceania", "name": "Kiribati"},
{"code": "KP", "continent": "Asia", "name": "North Korea"},
{"code": "KR", "continent": "Asia", "name": "South Korea"},
{"code": "KW", "continent": "Asia", "name": "Kuwait"},
{"code": "LB", "continent": "Asia", "name": "Lebanon"},
{"code": "LI", "continent": "Europe", "name": "Liechtenstein"},
{"code": "LR", "continent": "Africa", "name": "Liberia"},
{"code": "LS", "continent": "Africa", "name": "Lesotho"},
{"code": "LT", "continent": "Europe", "name": "Lithuania"},
{"code": "LU", "continent": "Europe", "name": "Luxembourg"},
{"code": "LV", "continent": "Europe", "name": "Latvia"},
{"code": "LY", "continent": "Africa", "name": "Libya"},
{"code": "MG", "continent": "Africa", "name": "Madagascar"},
{"code": "MH", "continent": "Oceania", "name": "Marshall Islands"},
{"code": "MK", "continent": "Europe", "name": "Macedonia"},
{"code": "ML", "continent": "Africa", "name": "Mali"},
{"code": "MM", "continent": "Asia", "name": "Myanmar"},
{"code": "MN", "continent": "Asia", "name": "Mongolia"},
{"code": "MR", "continent": "Africa", "name": "Mauritania"},
{"code": "MT", "continent": "Europe", "name": "Malta"},
{"code": "MU", "continent": "Africa", "name": "Mauritius"},
{"code": "MV", "continent": "Asia", "name": "Maldives"},
{"code": "MW", "continent": "Africa", "name": "Malawi"},
{"code": "MX", "continent": "North America", "name": "Mexico"},
{"code": "MY", "continent": "Asia", "name": "Malaysia"},
{"code": "MZ", "continent": "Africa", "name": "Mozambique"},
{"code": "NA", "continent": "Africa", "name": "Namibia"},
{"code": "NE", "continent": "Africa", "name": "Niger"},
{"code": "NG", "continent": "Africa", "name": "Nigeria"},
{"code": "NI", "continent": "North America", "name": "Nicaragua"},
{"code": "NL", "continent": "Europe", "name": "Kingdom of the Netherlands"},
{"code": "NL", "continent": "Europe", "name": "Netherlands"},
{"code": "NO", "continent": "Europe", "name": "Norway"},
{"code": "NP", "continent": "Asia", "name": "Nepal"},
{"code": "NR", "continent": "Oceania", "name": "Nauru"},
{"code": "NZ", "continent": "Oceania", "name": "New Zealand"},
{"code": "OM", "continent": "Asia", "name": "Oman"},
{"code": "PA", "continent": "North America", "name": "Panama"},
{"code": "PE", "continent": "South America", "name": "Peru"},
{"code": "PG", "continent": "Oceania", "name": "Papua New Guinea"},
{"code": "PH", "continent": "Asia", "name": "Philippines"},
{"code": "PK", "continent": "Asia", "name": "Pakistan"},
{"code": "PL", "continent": "Europe", "name": "Poland"},
{"code": "PT", "continent": "Europe", "name": "Portugal"},
{"code": "PW", "continent": "Oceania", "name": "Palau"},
{"code": "PY", "continent": "South America", "name": "Paraguay"},
{"code": "QA", "continent": "Asia", "name": "Qatar"},
{"code": "RO", "continent": "Europe", "name": "Romania"},
{"code": "RU", "continent": "Europe", "name": "Russia"},
{"code": "RU", "continent": "Europe", "name": "Russian Federation"},
{"code": "RW", "continent": "Africa", "name": "Rwanda"},
{"code": "SA", "continent": "Asia", "name": "Saudi Arabia"},
{"code": "SB", "continent": "Oceania", "name": "Solomon Islands"},
{"code": "SC", "continent": "Africa", "name": "Seychelles"},
{"code": "SD", "continent": "Africa", "name": "Sudan"},
{"code": "SE", "continent": "Europe", "name": "Sweden"},
{"code": "SG", "continent": "Asia", "name": "Singapore"},
{"code": "SI", "continent": "Europe", "name": "Slovenia"},
{"code": "SK", "continent": "Europe", "name": "Slovakia"},
{"code": "SL", "continent": "Africa", "name": "Sierra Leone"},
{"code": "SM", "continent": "Europe", "name": "San Marino"},
{"code": "SN", "continent": "Africa", "name": "Senegal"},
{"code": "SO", "continent": "Africa", "name": "Somalia"},
{"code": "SR", "continent": "South America", "name": "Suriname"},
{"code": "ST", "continent": "Africa", "name": u"República Democrática de São Tomé e Príncipe"},
{"code": "SY", "continent": "Asia", "name": "Syria"},
{"code": "TG", "continent": "Africa", "name": "Togo"},
{"code": "TH", "continent": "Asia", "name": "Thailand"},
{"code": "TJ", "continent": "Asia", "name": "Tajikistan"},
{"code": "TM", "continent": "Asia", "name": "Turkmenistan"},
{"code": "TN", "continent": "Africa", "name": "Tunisia"},
{"code": "TO", "continent": "Oceania", "name": "Tonga"},
{"code": "TR", "continent": "Asia", "name": "Turkey"},
{"code": "TT", "continent": "North America", "name": "Trinidad and Tobago"},
{"code": "TV", "continent": "Oceania", "name": "Tuvalu"},
{"code": "TZ", "continent": "Africa", "name": "Tanzania"},
{"code": "UA", "continent": "Europe", "name": "Ukraine"},
{"code": "UG", "continent": "Africa", "name": "Uganda"},
{"code": "US", "continent": "North America", "name": "United States"},
{"code": "UY", "continent": "South America", "name": "Uruguay"},
{"code": "UZ", "continent": "Asia", "name": "Uzbekistan"},
{"code": "VA", "continent": "Europe", "name": "Vatican City"},
{"code": "VE", "continent": "South America", "name": "Venezuela"},
{"code": "VN", "continent": "Asia", "name": "Vietnam"},
{"code": "VU", "continent": "Oceania", "name": "Vanuatu"},
{"code": "YE", "continent": "Asia", "name": "Yemen"},
{"code": "ZM", "continent": "Africa", "name": "Zambia"},
{"code": "ZW", "continent": "Africa", "name": "Zimbabwe"},
{"code": "DZ", "continent": "Africa", "name": "Algeria"},
{"code": "BA", "continent": "Europe", "name": "Bosnia and Herzegovina"},
{"code": "KH", "continent": "Asia", "name": "Cambodia"},
{"code": "CF", "continent": "Africa", "name": "Central African Republic"},
{"code": "TD", "continent": "Africa", "name": "Chad"},
{"code": "KM", "continent": "Africa", "name": "Comoros"},
{"code": "HR", "continent": "Europe", "name": "Croatia"},
{"code": "TL", "continent": "Asia", "name": "East Timor"},
{"code": "SV", "continent": "North America", "name": "El Salvador"},
{"code": "GQ", "continent": "Africa", "name": "Equatorial Guinea"},
{"code": "GD", "continent": "North America", "name": "Grenada"},
{"code": "KZ", "continent": "Asia", "name": "Kazakhstan"},
{"code": "LA", "continent": "Asia", "name": "Laos"},
{"code": "FM", "continent": "Oceania", "name": "Federated States of Micronesia"},
{"code": "MD", "continent": "Europe", "name": "Moldova"},
{"code": "MC", "continent": "Europe", "name": "Monaco"},
{"code": "ME", "continent": "Europe", "name": "Montenegro"},
{"code": "MA", "continent": "Africa", "name": "Morocco"},
{"code": "KN", "continent": "North America", "name": "Saint Kitts and Nevis"},
{"code": "LC", "continent": "North America", "name": "Saint Lucia"},
{"code": "VC", "continent": "North America", "name": "Saint Vincent and the Grenadines"},
{"code": "WS", "continent": "Oceania", "name": "Samoa"},
{"code": "RS", "continent": "Europe", "name": "Serbia"},
{"code": "ZA", "continent": "Africa", "name": "South Africa"},
{"code": "ES", "continent": "Europe", "name": "Spain"},
{"code": "LK", "continent": "Asia", "name": "Sri Lanka"},
{"code": "SZ", "continent": "Africa", "name": "Swaziland"},
{"code": "CH", "continent": "Europe", "name": "Switzerland"},
{"code": "AE", "continent": "Asia", "name": "United Arab Emirates"},
{"code": "GB", "continent": "Europe", "name": "United Kingdom"},
{"code": "TW", "continent": "Asia", "name": "Taiwan"},
{"code": "AW", "continent": "North America", "name": "Aruba"},
{"code": "FO", "continent": "Europe", "name": "Faroe Islands"},
{"code": "GI", "continent": "Europe", "name": "Gibraltar"},
{"code": "GU", "continent": "Oceania", "name": "Guam"},
{"code": "HK", "continent": "Asia", "name": "Hong Kong"},
{"code": "HT", "continent": "North America", "name": "Haiti"},
{"code": "IM", "continent": "Europe", "name": "Isle of Man"},
{"code": "JE", "continent": "Europe", "name": "Jersey"},
{"code": "KY", "continent": "North America", "name": "Cayman Islands"},
{"code": "MP", "continent": "Oceania", "name": "Northern Mariana Islands"},
{"code": "NC", "continent": "Oceania", "name": "New Caledonia"},
{"code": "PF", "continent": "Oceania", "name": "French Polynesia"},
{"code": "PR", "continent": "South America", "name": "Puerto Rico"},
{"code": "VI", "continent": "North America", "name": "US Virgin Islands"},
{"code": "YT", "continent": "Africa", "name": "Mayotte"},
]
strContinent = None
if strCountryName != None:
strCountryName = unicode(strCountryName.lower().strip())
for country in countries:
if strCountryName == unicode(country["name"].lower().strip()):
strContinent = country["continent"]
return strContinent | bsd-3-clause | -676,311,704,294,043,800 | 57.577947 | 170 | 0.555873 | false |
tcyb/nextgen4b | nextgen4b/process/sites.py | 1 | 2110 | from Bio import SeqIO
import yaml
import sys
import os
def replace_deletions(word, seq, idxs, del_letter='d'):
"""
Replace any '-' in word with del_letter if the nucleotides next to it in
seq are not '-'.
"""
new_word = [c for c in word]
for i, letter in enumerate(word):
# assume we're not looking at the start or end of sequence
idx = idxs[i]
assert idx > 0 and idx < len(seq)
if letter == '-':
if seq[idx-1] != '-' and seq[idx+1] != '-':
new_word[i] = del_letter
return ''.join(new_word)
def get_positions(f_name, sites, keep_dashes=True, mark_deletions=False):
"""
Reads in a fasta file of sequences (usually produced by nextgen_main.py)
at location f_name, and pulls out the bases at the (0-start) indices in
sites.
Input:
- f_name: str
- sites: list (ints)
- keep_dashes: bool
- mark_deletions: bool
Output:
- words: list
Options:
keep_dashes: if this is false, get_positions will discard any words with a
dash in them (generally denoting a deletion)
mark_deletions: if this is true, deletions (dashes flanked by non-dashes on
both sides) will be marked (with a 'd', but this should be
customizable?)
"""
words = []
with open(f_name) as f:
seq_iter = SeqIO.parse(f, 'fasta')
for s in seq_iter:
selected_letters = ''.join([str(s.seq[i]) for i in sites])
if '-' not in selected_letters:
words.append(selected_letters)
elif keep_dashes:
if mark_deletions:
words.append(replace_deletions(selected_letters, s, sites))
else:
words.append(selected_letters)
return words
if __name__ == '__main__':
in_name = sys.argv[1]
out_name = sys.argv[2]
sites = [int(x) for x in sys.argv[3:]]
words = get_positions(in_name, sites, keep_dashes=True)
with open(out_name, 'w') as of:
for word in words:
of.write('%s\n' % word) | mit | 9,148,165,284,312,616,000 | 29.157143 | 79 | 0.569668 | false |
DmitrySPetrov/simulation_g11 | l07/21.py | 1 | 1146 | # Задача №2:
# Сформировать словарь, ключами которого являются квадраты чисел с шагом 0.01,
# а значениями - кубический корень из ключа
#
# Вариант решения №1, через цикл for
#
# =!!= Запускать с помощью Python3 =!!=
# Печатать словарь будем функцией pprint
from pprint import pprint
# Количество элементов
N = 50
# Создаем пустой словарь
A = {}
# В цикле дополняем словарь
for i in range( N ):
key = ( i * 0.01 )**2
A[ key ] = key**(1/3)
# Выводим словарь
pprint( A )
# Пояснения:
# 1) range(N) возвращает последовательность 0,1,N-1
# 2) указанный здесь цикл for выполнится следующим образом:
# операторы, указанные внутри цикла, будут выполнены для всех i=0,1...N-1
# 3) оператор a**b возвращает число a в степени b
| mit | -4,652,178,838,154,709,000 | 22.741935 | 78 | 0.69837 | false |
eikonomega/file-comparison-panda | file_comparison_panda/file_comparison_panda.py | 1 | 5939 | """
The file_comparison module exists to easily compare the contents of two
files. The functionality of this module is currently limited to CSV files.
"""
import csv
from file_comparison_exceptions import (
UnsupportedFileType, FileDoesNotExist, PermissionDeniedOnFile)
from os import unsetenv
SUPPORTED_FILE_TYPES = ['csv']
class FileComparisonPanda(object):
"""
Compares the data in two files and provides matching and unique
records.
"""
def __init__(
self, file_path_1, file_path_2):
"""
Verify that constructor arguments are actually files and
of supported types. Perform routine object
initialization tasks.
Args:
file_path_1 (str): Filepath of first file for comparison.
file_path_2 (str): Filepath of second file for comparison.
Raises:
IOError: When one of the files identified by the parameters
doesn't exist or is inaccessible.
NotImplementedError: When one of the files being compared has
a non-supported file extension.
"""
self._unique_records = dict()
self._matching_records = list()
self.file_one = file_path_1
self.file_two = file_path_2
@property
def file_one(self):
return self._file_one
@file_one.setter
def file_one(self, file_path):
FileComparisonPanda._verify_acceptable_file_extensions(
[file_path], SUPPORTED_FILE_TYPES)
FileComparisonPanda._verify_file_accessibility(file_path)
self._file_one = file_path
self._reset_file_comparison_data()
@file_one.deleter
def file_one(self):
self._file_one = None
@property
def file_two(self):
return self._file_two
@file_two.setter
def file_two(self, file_path):
FileComparisonPanda._verify_acceptable_file_extensions(
[file_path], SUPPORTED_FILE_TYPES)
FileComparisonPanda._verify_file_accessibility(file_path)
self._file_two = file_path
self._reset_file_comparison_data()
@file_two.deleter
def file_two(self):
self._file_two = None
@staticmethod
def _verify_acceptable_file_extensions(
list_of_filenames, list_of_extensions):
"""
Determine if every file in list_of_files has one of the extensions
in list_of_extensions. If so, return True. Otherwise, return False.
Caller is responsible to provide valid filenames.
"""
for filename in list_of_filenames:
filename_parts = filename.partition('.')
if filename_parts[2] not in list_of_extensions:
raise UnsupportedFileType(
"One of the file paths provided to FileComparisonPanda() "
"references an unsupported file type. The following "
"file types are supported: {}".format(SUPPORTED_FILE_TYPES))
@staticmethod
def _verify_file_accessibility(file_path):
try:
file_being_verified = open(file_path, 'rU')
except IOError as error:
if error.errno == 2:
raise FileDoesNotExist(
"One of the file paths provided to FileComparisonPanda() "
"is invalid. Verify that '{}' exists".format(
error.filename))
elif error.errno == 13:
raise PermissionDeniedOnFile(
"One of the file paths provided to FileComparisonPanda() "
"is not accessible. Verify that '{}' is readable "
"by the user running the program".format(
error.filename))
raise
else:
file_being_verified.close()
def _reset_file_comparison_data(self):
self._unique_records = dict()
self._matching_records = list()
# print self._unique_records
# print self._matching_records
def _compare_files(self):
"""
Identify unique and matching records from self._file_one and
self.file_two using various set operations.
"""
with open(self._file_one, 'rU') as file_one:
file_one_records = set(
FileComparisonPanda._load_file_into_memory(file_one))
with open(self._file_two, 'rU') as file_two:
file_two_records = set(
FileComparisonPanda._load_file_into_memory(file_two))
self._matching_records.extend(
file_one_records.intersection(file_two_records))
self._unique_records['file_one'] = list(
file_one_records.difference(file_two_records))
self._unique_records['file_two'] = list(
file_two_records.difference(file_one_records))
@staticmethod
def _load_file_into_memory(file_object):
"""
Load the contents of a CSV file into memory for faster
performance.
IMPORTANT: This creates the potential for the program
to bomb out when it encounters memory limits.
"""
csv_reader = csv.reader(file_object)
records = [tuple(record) for record in csv_reader]
return records
@property
def unique_records(self):
"""
Returns:
A dict containing two elements ['_file_one', 'file_two'] each of
which are lists of unique records found during _compare_files().
Raises:
AttributeError: When the method is called prior to _compare_files().
"""
if not self._unique_records:
self._compare_files()
return self._unique_records
@property
def matching_records(self):
"""
A list of records that were found in both files.
"""
if not self._matching_records:
self._compare_files()
return self._matching_records | mit | -991,835,789,728,919,200 | 30.595745 | 80 | 0.597407 | false |
jacquerie/invenio-grobid | setup.py | 1 | 4073 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module to interact with Grobid API for metadata extraction."""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
requirements = [
'Flask>=0.10.1',
'six>=1.7.2',
'Invenio>=2.0.3',
]
test_requirements = [
'pytest>=2.7.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'coverage>=3.7.1',
]
class PyTest(TestCommand):
"""PyTest Test."""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
"""Init pytest."""
TestCommand.initialize_options(self)
self.pytest_args = []
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
config = ConfigParser()
config.read('pytest.ini')
self.pytest_args = config.get('pytest', 'addopts').split(' ')
def finalize_options(self):
"""Finalize pytest."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run tests."""
# import here, cause outside the eggs aren't loaded
import pytest
import _pytest.config
pm = _pytest.config.get_plugin_manager()
pm.consider_setuptools_entrypoints()
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_grobid', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-grobid',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio',
license='GPLv2',
author='CERN',
author_email='[email protected]',
url='https://github.com/inspirehep/invenio-grobid',
packages=[
'invenio_grobid',
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=requirements,
extras_require={
'docs': [
'Sphinx>=1.3',
'sphinx_rtd_theme>=0.1.7'
],
'tests': test_requirements
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Development Status :: 1 - Planning',
],
tests_require=test_requirements,
cmdclass={'test': PyTest},
)
| gpl-2.0 | -4,932,404,915,475,860,000 | 30.091603 | 76 | 0.632212 | false |
lych0317/CNBlogs_server | CNBlogs/Protocol/SearchBlogProtocol.py | 1 | 2678 | #!/usr/local/bin/python
# -*- coding:utf8 -*-
__author__ = 'liyc'
import urllib2
import re
from bs4 import BeautifulSoup
def search_blog_keyword_page(keyword, page="0"):
url = "http://zzk.cnblogs.com/s?t=b&dateMin=2013-01-01"
if keyword:
url = url + "&w=" + keyword
if page:
url = url + "&p=" + page
print url
req = urllib2.Request(url)
con = urllib2.urlopen(req)
doc = con.read()
con.close()
soup = BeautifulSoup(doc, 'html.parser')
searchItemArray = soup.find_all("div", attrs={"class": "searchItem"})
itemArray = []
for searchItem in searchItemArray:
item = {}
tag = searchItem.find(attrs={"class": "searchItemTitle"})
if tag:
href = tag.a.get("href")
pattern = re.compile("/")
match = pattern.split(href)[-1]
if match:
pattern = re.compile("\.")
match = pattern.split(match)[0]
if match:
pattern = re.compile("^\d*$")
match = pattern.match(match)
if match:
item["identifier"] = match.group()
else:
continue
item["link"] = href
tag = searchItem.find(attrs={"class": "searchItemTitle"})
if tag:
item["title"] = tag.a.text
tag = searchItem.find(attrs={"class": "searchCon"})
if tag:
item["summary"] = tag.text.strip()
tag = searchItem.find(attrs={"class": "searchItemInfo-userName"})
if tag:
author = {"uri": tag.a.get("href"), "name": tag.a.text, "avatar": ""}
item["author"] = author
tag = searchItem.find(attrs={"class": "searchItemInfo-publishDate"})
if tag:
item["publishDate"] = tag.text
item["updateDate"] = tag.text
pattern = re.compile("\d+")
tag = searchItem.find(attrs={"class": "searchItemInfo-good"})
if tag:
good = tag.text
match = pattern.search(good)
if match:
item["diggs"] = match.group()
tag = searchItem.find(attrs={"class": "searchItemInfo-comments"})
if tag:
comments = tag.text
match = pattern.search(comments)
if match:
item["comments"] = match.group()
tag = searchItem.find(attrs={"class": "searchItemInfo-views"})
if tag:
views = tag.text
match = pattern.search(views)
if match:
item["views"] = match.group()
itemArray.append(item)
return itemArray
| apache-2.0 | -4,214,701,241,209,484,300 | 28.428571 | 81 | 0.509709 | false |
herereadthis/django_project | polls/admin.py | 1 | 1075 | from django.contrib import admin
from polls.models import Choice, Poll
# Register your models here.
# customize the admin form by creating a model admin object, then pass it
# as the second argument to admin.site.register()
# inline: you can do TabularInline (saves space) or StackedInline
class ChoiceInline(admin.TabularInline):
# Choice Model
model = Choice
# add extra fields
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None,
{
'fields': ['question']
}),
('Date information',
{
'fields': ['pub_date'],
# makes the makes the fieldset auto-hide
'classes': ['collapse']
}),
]
# tells Django: choice objects are edited on the Poll admin page. By
# default, provide enough field for 3 choices.
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question']
admin.site.register(Poll, PollAdmin)
| mit | 1,060,910,523,673,767,200 | 28.054054 | 73 | 0.612093 | false |
dantkz/spatial-transformer-tensorflow | example_affine.py | 1 | 2113 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import AffineTransformer
import numpy as np
import scipy.misc
# Input image retrieved from:
# https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('data/cat.jpg')
im = im / 255.
im = im.astype('float32')
# input batch
batch_size = 4
batch = np.expand_dims(im, axis=0)
batch = np.tile(batch, [batch_size, 1, 1, 1])
# input placeholder
x = tf.placeholder(tf.float32, [batch_size, im.shape[0], im.shape[1], im.shape[2]])
# Let the output size of the affine transformer be quarter of the image size.
outsize = (int(im.shape[0]/4), int(im.shape[1]/4))
# Affine Transformation Layer
stl = AffineTransformer(outsize)
# Identity transformation parameters
initial = np.array([1.0, 0.0, 0.0,
0.0, 1.0, 0.0]).astype('float32')
initial = np.reshape(initial, [1, stl.param_dim])
# Run session
with tf.Session() as sess:
with tf.device("/cpu:0"):
with tf.variable_scope('spatial_transformer') as scope:
# Random jitter of the identity parameters
theta = initial + 0.1*tf.random_normal([batch_size, stl.param_dim])
result = stl.transform(x, theta)
sess.run(tf.global_variables_initializer())
result_ = sess.run(result, feed_dict={x: batch})
# save our result
for i in range(result_.shape[0]):
scipy.misc.imsave('affine' + str(i) + '.png', result_[i])
| apache-2.0 | -6,119,487,510,610,097,000 | 34.216667 | 83 | 0.684808 | false |
who-emro/meerkat_api | meerkat_api/config.py | 1 | 1303 | """
config.py
Configuration and settings
"""
from os import getenv
class Config(object):
DEBUG = True
TESTING = False
PRODUCTION = False
# Global stuff
SQLALCHEMY_DATABASE_URI = getenv("SQLALCHEMY_DATABASE_URI", 'postgresql+psycopg2://postgres:postgres@db/meerkat_db')
API_KEY = "test-api"
AUTH = {
'default': [['registered'], ['demo']]
}
SQLALCHEMY_TRACK_MODIFICATIONS = False
APPLICATION_ROOT = "/api"
PROPAGATE_EXCEPTIONS = True
BROKER_URL = 'amqp://guest@rabbit//'
CELERY_RESULT_BACKEND = 'rpc://guest@rabbit//'
CELERY_TASK_SERIALIZER = 'yaml'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json', 'yaml']
SENTRY_DNS = getenv('SENTRY_DNS', '')
INTERNAL_DEVICE_API_ROOT = getenv("MOB_API_ROOT", 'http://nginx/mob')
EXTERNAL_DEVICE_API_ROOT = '/mob'
SEND_LOGG_EVENTS = getenv("SEND_LOGG_EVENTS", False)
LOGGING_URL = getenv("LOGGING_URL", None)
LOGGING_SOURCE = getenv("LOGGING_SOURCE", "dev")
LOGGING_SOURCE_TYPE = "api"
LOGGING_IMPLEMENTATION = getenv("LOGGING_IMPLEMENTATION", "demo")
class Production(Config):
DEBUG = False
PRODUCTION = True
class Development(Config):
DEBUG = True
class Testing(Config):
DEBUG = False
TESTING = True
API_KEY = ''
| mit | 3,249,643,469,962,440,700 | 25.591837 | 120 | 0.650038 | false |
brunoripa/NoseGAE | nosegae.py | 1 | 13539 | import os
import logging
import sys
import tempfile
from nose.plugins.base import Plugin
from nose.case import FunctionTestCase
# Solution from
# http://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package
from pkg_resources import get_distribution, DistributionNotFound
try:
_dist = get_distribution('nosegae')
# Normalize case for Windows systems
dist_loc = os.path.normcase(_dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, 'nosegae')):
# not installed, but there is another version that *is*
raise DistributionNotFound
except DistributionNotFound:
__version__ = 'DEVELOPMENT'
else:
__version__ = _dist.version
logger = logging.getLogger(__name__)
class NoseGAE(Plugin):
"""Activate this plugin to run tests in Google App Engine dev environment. When the plugin is active,
Google App Engine dev stubs such as the datastore, memcache, taskqueue, and more can be made available.
"""
name = 'gae'
def options(self, parser, env=os.environ):
super(NoseGAE, self).options(parser, env)
parser.add_option(
'--gae-lib-root', default='/usr/local/google_appengine',
dest='gae_lib_root',
help='Set the path to the root directory of the Google '
'Application Engine installation')
parser.add_option(
'--gae-application', default=None, action='store', dest='gae_app',
help='Set the path to the GAE application '
'under test. Default is the nose `where` '
'directory (generally the cwd)')
parser.add_option(
'--gae-datastore', default=None, action='store', dest='gae_data',
help='Set the path to the GAE datastore to use in tests. '
'Note that when using an existing datastore directory, the '
'datastore will not be cleared before testing begins.')
def configure(self, options, config):
super(NoseGAE, self).configure(options, config)
if not self.enabled:
return
if sys.version_info[0:2] < (2, 7):
raise EnvironmentError(
"Python version must be 2.7 or greater, like the Google App Engine environment. "
"Tests are running with: %s" % sys.version)
try:
self._app_path = options.gae_app.split(',')
except AttributeError:
self._app_path = [config.workingDir]
self._data_path = options.gae_data or os.path.join(tempfile.gettempdir(),
'nosegae.sqlite3')
if options.gae_lib_root not in sys.path:
options.gae_lib_root = os.path.realpath(options.gae_lib_root)
sys.path.insert(0, options.gae_lib_root)
for path_ in self._app_path:
path_ = os.path.realpath(path_)
if not os.path.isdir(path_):
path_ = os.path.dirname(path_)
if path_ not in sys.path:
sys.path.append(path_)
if 'google' in sys.modules:
# make sure an egg (e.g. protobuf) is not cached
# with the wrong path:
reload(sys.modules['google'])
try:
import appengine_config
except ImportError:
pass
# TODO: this may need to happen after parsing your yaml files in
# The case of modules but I need to investigate further
import dev_appserver
dev_appserver.fix_sys_path() # add paths to libs specified in app.yaml, etc
# This file handles some OS compat settings. Most notably the `TZ` stuff
# to resolve https://github.com/Trii/NoseGAE/issues/14.
# It may need to be removed in the future if Google changes the functionality
import google.appengine.tools.os_compat
from google.appengine.tools.devappserver2 import application_configuration
# get the app id out of your app.yaml and stuff
self.configuration = application_configuration.ApplicationConfiguration(self._app_path)
os.environ['APPLICATION_ID'] = self.configuration.app_id
# simulate same environment as devappserver2
os.environ['CURRENT_VERSION_ID'] = self.configuration.modules[0].version_id
self.is_doctests = options.enable_plugin_doctest
# As of SDK 0.2.5 the dev_appserver.py aggressively adds some logging handlers.
# This removes the handlers but note that Nose will still capture logging and
# report it during failures. See Issue 25 for more info.
rootLogger = logging.getLogger()
for handler in rootLogger.handlers:
if isinstance(handler, logging.StreamHandler):
rootLogger.removeHandler(handler)
def startTest(self, test):
"""Initializes Testbed stubs based off of attributes of the executing test
allow tests to register and configure stubs by setting properties like
nosegae_<stub_name> and nosegae_<stub_name>_kwargs
Example
class MyTest(unittest.TestCase):
nosegae_datastore_v3 = True
nosegae_datastore_v3_kwargs = {
'datastore_file': '/tmp/nosegae.sqlite3,
'use_sqlite': True
}
def test_something(self):
entity = MyModel(name='NoseGAE')
entity.put()
self.assertNotNone(entity.key.id())
Args
:param test: The unittest.TestCase being run
:type test: unittest.TestCase
"""
from google.appengine.ext import testbed
self._add_missing_stubs(testbed)
self.testbed = testbed.Testbed()
self.testbed.activate()
# Give the test access to the active testbed
the_test = test.test
if isinstance(the_test, FunctionTestCase):
the_test = the_test.test
the_test.testbed = self.testbed
# Fix - no other way to inject app_id using NoseGAE, right ?
custom_app_id = getattr(test.test, "CUSTOM_APP_ID", None)
setup_env_done = False
for stub_name, stub_init in testbed.INIT_STUB_METHOD_NAMES.iteritems():
if not getattr(the_test, 'nosegae_%s' % stub_name, False):
continue
stub_kwargs = getattr(the_test, 'nosegae_%s_kwargs' % stub_name, {})
if stub_name == testbed.TASKQUEUE_SERVICE_NAME:
self._init_taskqueue_stub(**stub_kwargs)
elif stub_name == testbed.DATASTORE_SERVICE_NAME:
if not self.testbed.get_stub(testbed.MEMCACHE_SERVICE_NAME):
# ndb requires memcache so enable it as well as the datastore_v3
self.testbed.init_memcache_stub()
self._init_datastore_v3_stub(**stub_kwargs)
elif stub_name == testbed.USER_SERVICE_NAME:
if custom_app_id:
self.testbed.setup_env(overwrite=True,
USER_ID=stub_kwargs.pop('USER_ID', 'testuser'),
USER_EMAIL=stub_kwargs.pop('USER_EMAIL', '[email protected]'),
USER_IS_ADMIN=stub_kwargs.pop('USER_IS_ADMIN', '1'),
app_id=custom_app_id)
else:
self.testbed.setup_env(overwrite=True,
USER_ID=stub_kwargs.pop('USER_ID', 'testuser'),
USER_EMAIL=stub_kwargs.pop('USER_EMAIL', '[email protected]'),
USER_IS_ADMIN=stub_kwargs.pop('USER_IS_ADMIN', '1'))
setup_env_done = True
if not setup_env_done and custom_app_id:
self.testbed.setup_env(overwrite=True, app_id=custom_app_id)
getattr(self.testbed, stub_init)(**stub_kwargs)
if self.is_doctests:
self._doctest_compat(the_test)
self.the_test = the_test
def stopTest(self, test):
self.testbed.deactivate()
del self.the_test.testbed
del self.the_test
def _doctest_compat(self, the_test):
"""Enable compatibility with doctests by setting the current testbed into the doctest scope"""
try:
the_test._dt_test.globs['testbed'] = self.testbed
except AttributeError:
# not a nose.plugins.doctests.DocTestCase?
pass
def _add_missing_stubs(self, testbed):
"""Monkeypatch the testbed for stubs that do not have an init method yet"""
if not hasattr(testbed, 'PROSPECTIVE_SEARCH_SERVICE_NAME'):
from google.appengine.api.prospective_search.prospective_search_stub import ProspectiveSearchStub
testbed.PROSPECTIVE_SEARCH_SERVICE_NAME = 'matcher'
testbed.INIT_STUB_METHOD_NAMES.update({
testbed.PROSPECTIVE_SEARCH_SERVICE_NAME: 'init_prospective_search_stub'
})
def init_prospective_search_stub(self, enable=True, data_file=None):
"""Workaround to avoid prospective search complain until there is a proper testbed stub
http://stackoverflow.com/questions/16026703/testbed-stub-for-google-app-engine-prospective-search
Args:
:param self: The Testbed instance.
:param enable: True if the fake service should be enabled, False if real
service should be disabled.
"""
if not enable:
self._disable_stub(testbed.PROSPECTIVE_SEARCH_SERVICE_NAME)
return
stub = ProspectiveSearchStub(
prospective_search_path=data_file,
taskqueue_stub=self.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
self._register_stub(testbed.PROSPECTIVE_SEARCH_SERVICE_NAME, stub)
testbed.Testbed.init_prospective_search_stub = init_prospective_search_stub
<<<<<<< HEAD
=======
def _init_taskqueue_stub(self, **stub_kwargs):
"""Initializes the taskqueue stub using nosegae config magic"""
task_args = {}
# root_path is required so the stub can find 'queue.yaml' or 'queue.yml'
if 'root_path' not in stub_kwargs:
for p in self._app_path:
# support --gae-application values that may be a .yaml file
dir_ = os.path.dirname(p) if os.path.isfile(p) else p
if os.path.isfile(os.path.join(dir_, 'queue.yaml')) or \
os.path.isfile(os.path.join(dir_, 'queue.yml')):
task_args['root_path'] = dir_
break
task_args.update(stub_kwargs)
self.testbed.init_taskqueue_stub(**task_args)
def _init_datastore_v3_stub(self, **stub_kwargs):
"""Initializes the datastore stub using nosegae config magic"""
task_args = dict(datastore_file=self._data_path)
task_args.update(stub_kwargs)
self.testbed.init_datastore_v3_stub(**task_args)
def _init_user_stub(self, **stub_kwargs):
"""Initializes the user stub using nosegae config magic"""
# do a little dance to keep the same kwargs for multiple tests in the same class
# because the user stub will barf if you pass these items into it
# stub = user_service_stub.UserServiceStub(**stub_kw_args)
# TypeError: __init__() got an unexpected keyword argument 'USER_IS_ADMIN'
task_args = stub_kwargs.copy()
self.testbed.setup_env(overwrite=True,
USER_ID=task_args.pop('USER_ID', 'testuser'),
USER_EMAIL=task_args.pop('USER_EMAIL', '[email protected]'),
USER_IS_ADMIN=task_args.pop('USER_IS_ADMIN', '1'))
self.testbed.init_user_stub(**task_args)
def _init_modules_stub(self, **_):
"""Initializes the modules stub based off of your current yaml files
Implements solution from
http://stackoverflow.com/questions/28166558/invalidmoduleerror-when-using-testbed-to-unit-test-google-app-engine
"""
from google.appengine.api import request_info
# edit all_versions per modules & versions thereof needing tests
all_versions = {} # {'default': [1], 'andsome': [2], 'others': [1]}
def_versions = {} # {m: all_versions[m][0] for m in all_versions}
m2h = {} # {m: {def_versions[m]: 'localhost:8080'} for m in def_versions}
for module in self.configuration.modules:
module_name = module._module_name or 'default'
module_version = module._version or '1'
all_versions[module_name] = [module_version]
def_versions[module_name] = module_version
m2h[module_name] = {module_version: 'localhost:8080'}
request_info._local_dispatcher = request_info._LocalFakeDispatcher(
module_names=list(all_versions),
module_name_to_versions=all_versions,
module_name_to_default_versions=def_versions,
module_name_to_version_to_hostname=m2h)
self.testbed.init_modules_stub()
def _init_stub(self, stub_init, **stub_kwargs):
"""Initializes all other stubs for consistency's sake"""
getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs)
>>>>>>> upstream/master
| bsd-2-clause | 754,867,155,543,483,600 | 44.432886 | 124 | 0.601152 | false |
google/turbinia | turbinia/workers/fsstat.py | 1 | 1589 | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task to run fsstat on disk partitions."""
from __future__ import unicode_literals
import os
from turbinia import TurbiniaException
from turbinia.workers import TurbiniaTask
from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
class FsstatTask(TurbiniaTask):
REQUIRED_STATES = [state.ATTACHED]
def run(self, evidence, result):
"""Task to execute fsstat.
Args:
evidence (Evidence object): The evidence we will process.
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
fsstat_output = os.path.join(self.output_dir, 'fsstat.txt')
output_evidence = ReportText(source_path=fsstat_output)
cmd = ['sudo', 'fsstat', evidence.device_path]
result.log('Running fsstat as [{0!s}]'.format(cmd))
self.execute(
cmd, result, stdout_file=fsstat_output, new_evidence=[output_evidence],
close=True)
return result | apache-2.0 | -2,246,638,979,258,569,000 | 32.125 | 79 | 0.723096 | false |
bitmovin/bitmovin-python | bitmovin/resources/models/outputs/generic_s3_output.py | 1 | 2304 | from bitmovin.utils import Serializable
from bitmovin.resources.enums import S3SignatureVersion
from . import AbstractOutput
class GenericS3Output(AbstractOutput, Serializable):
def __init__(self, access_key, secret_key, bucket_name, host, port=None, signature_version=None, ssl=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._signatureVersion = None
self.accessKey = access_key
self.secretKey = secret_key
self.bucketName = bucket_name
self.host = host
self.port = port
self.signatureVersion = signature_version
self.ssl = ssl
@property
def signatureVersion(self):
return self._signatureVersion
@signatureVersion.setter
def signatureVersion(self, new_sigver):
if new_sigver is None:
return
if isinstance(new_sigver, str):
self._signatureVersion = new_sigver
elif isinstance(new_sigver, S3SignatureVersion):
self._signatureVersion = new_sigver.value
else:
raise InvalidTypeError(
'Invalid type {} for signatureVersion: must be either str or S3SignatureVersion!'.format(type(new_signatureVersion)))
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
bucket_name = json_object['bucketName']
access_key = json_object.get('accessKey')
secret_key = json_object.get('secretKey')
name = json_object.get('name')
description = json_object.get('description')
host = json_object.get('host')
port = json_object.get('port')
signature_version = json_object.get('signatureVersion')
ssl = json_object.get('ssl')
generic_s3_output = GenericS3Output(
access_key=access_key, secret_key=secret_key, bucket_name=bucket_name, host=host, port=port, signature_version=signature_version,
ssl=ssl, id_=id_, name=name, description=description)
return generic_s3_output
def serialize(self):
serialized = super().serialize()
serialized['signatureVersion'] = self.signatureVersion
return serialized
| unlicense | -5,036,721,332,386,152,000 | 38.724138 | 141 | 0.648003 | false |
XiangyuQin/WeChat-Controller | code/testdecode.py | 1 | 18959 | # -*- coding:utf-8 -*-
#!/usr/bin/env python
__all__ = ['weChatController']
import cookielib
import urllib2
import urllib
import urlparse
import json
import poster
import hashlib
import time
import random
import sys
import os
import traceback
from cStringIO import StringIO
import tornado
import tornado.escape
from bs4 import BeautifulSoup
import uuid
#cookie = cookielib.MozillaCookieJar()
#_opener=poster.streaminghttp.register_openers()
#_openerreload(sys)
reload(sys)
sys.setdefaultencoding( "utf-8" )
class WeChatControllerException(Exception):
pass
class WeChatController(object):
def __init__(self, user='tool', redisdb=None, force=False, config=None):
"""
公众平台初始化
"""
self.test_appmsg = {
'author':'test',
'title':'test_merge2',
'sourceurl':'www.baidu.com',
'cover':'/home/pythonDir/cover.jpg',
'digest':"你好",
'content':'<p style="line-height: 25.6px; white-space: normal;"><em><span style="word-wrap: break-word; font-weight: 700;">表白,'\
'相信这个动作是每个人一生当中都会触发的一个行为,' \
'大部分人认为表白是跟女孩确定恋爱关系非常有效的一种方法,有耐心和妹子建立深层次的联系感并对妹子产生了吸引力,妹子接 ' \
'受的概念就会大增其实,盲目的表白你会发现往往到最后没有任何效果。</span></em></p><p style="line-height: 25.6px; white-spac' \
'e: normal;"><span style="line-height: 1.6;"> 有个朋友,做个一个实验,并把它录制成了一个视频剪辑,内容大概是这样的,他收集' \
'了现实生活中将近50个男人的表白现场实录,有的是在人民广场这样人流量聚焦的地区,有的像电影里那样是在很有格调的西餐厅,有的是在酒吧,有的是在朋 ' \
'友聚会,更有夸张一点的,你们可能都想不到,是在足球比赛的现场。</span></p><p style="line-height: 25.6px; white-space: normal;">最后的结果出来了,成功率几乎 '\
'只有5%不到,对的,你没看错,就是这么低。很多兄弟觉得不可思议,怎么会这样,和电视电影里的完全不一样啊,呵呵,因为这才是现实。为什么女人的反应都几乎是拒绝,完全不顾及' \
'男人的面子,也完全没有被感动的赶脚。</p><p style="line-height: 25.6px; white-space: normal;">那么我来告诉兄弟们,问题出在哪,因为这种情况下,女人会本能的产生一种压迫' \
'感,或者说是不安全感,她们会条件反射式的去拒绝。</p><p style="line-height: 25.6px; white-space: normal;">因为在进化学来看,远古时代的人类基因,仍然在现代人的基因里有' \
'保留,在古代的女人,她们一旦选定一个男人,她的生命就跟这个男人绑定在了一起,换句话说,如果这个男人不能提供足够的食物,这个女人在怀孕期间就会被饿死。</p><p style="lin' \
'e-height: 25.6px; white-space: normal;">这种选错了对象就要付出生命代价的基因一直延续至今,所以,女人一旦面对男人表白这种事情的时候,就会自动切换到理性思考模式,接受面临的是风险,而拒绝是最' \
'保险的做法,女人不傻,所以,她们会选择拒绝就不难理解了。<span style="line-height: 1.6;">现在兄弟们懂了这个道理,那有的兄弟要说了,既然这样,不去表白,怎么追到女人' \
',难道让女人对你表白么?恩,让女人表白也不是不可能的,我们家的方法就可以让你做到,让女人倒追你,我们有的是方法。</span></p><p style="line-height: 25.6px; white-s' \
'pace: normal;">这就是我们家自主开发的男神模式,它可以让你和女人的互动交流之后,让女人喜欢上你,让女人主动对你示好,对你表白。至于该怎么做,只需要关注我们' \
'的微信公众号,那里面有干货会告诉你。</p><p><br/></p>',
}
self.lastMsg = None
self._opener = None
self.user = user
self.key = "mp_%s" % user
self.ticket = None
self.ticket_id = None
self.token = None
self.email = "[email protected]"
self.password = "b3ca2251f5f48978f9e9c32aeb5fde26"
self.msgType = {'text': 1, 'image': 2, 'audio': 3, 'news': 10, 'video': 15}
self.login(force=force)
#print self.upload_img(img_url='/home/pythonDir/imagestest3.jpg')
self.add_appmsg(self.test_appmsg)
def _set_opener(self):
self._opener = poster.streaminghttp.register_openers()
self._opener.addheaders = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8'),
('Referer', 'https://mp.weixin.qq.com'),
('Cache-Control', 'max-age=0'),
('Connection', 'keep-alive'),
('Host', 'mp.weixin.qq.com'),
('Origin', 'https://mp.weixin.qq.com'),
('X-Requested-With', 'XMLHttpRequest'),
('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36')
]
def login(self, force=False):
"""
登陆公众平台,失败将抛出异常,token是经常更换的,登录成功后要暂停一会,否则cookie会被清空
"""
email = self.email
password = self.password
cookie = cookielib.MozillaCookieJar()
self._set_opener()
if force:
self._opener.add_handler(urllib2.HTTPCookieProcessor(cookie))
url = "https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN"
body = {'username': email, 'pwd': password, 'imgcode': '', 'f': 'json'}
req = self._opener.open(url, urllib.urlencode(body), timeout=30)
resp = req.read()
msg = json.loads(resp)
print msg
if 'base_resp' in msg and msg['base_resp']['ret'] == 0:
self.token = msg['redirect_url'].split("=")[-1]
print "token:%s" %self.token
else:
print 'login fail'
time.sleep(1)
else:
try:
print "force:%s" %force
except:
self.login(force=True)
def _ensure_login(self):
if not self.token==None:
self.check_notice()
def check_notice(self):
"""
获取系统通知
"""
url = "https://mp.weixin.qq.com/cgi-bin/sysnotify"
data = {
'count': 5,
'begin': 0,
'ajax': 1,
'random': random.random()
}
ret = self._send_request(url, data, method="GET")
return ret
def _get_ticket(self):
url = "https://mp.weixin.qq.com/cgi-bin/message"
data = {
't': 'message/list',
'count': 20,
'day': 0
}
ret = self._send_request(url, data, method="GET")
if ret:
ticket_id = ret['user_info']['user_name']
ticket = ret['base_resp']['media_ticket']
print ticket
print ticket_id
return ticket, ticket_id
else:
return None, None
def _send_request(self, url, data={}, headers={}, method='POST', jsonformat=True):
for i in xrange(3):
try:
if method == "POST":
print isinstance(data, dict)
if(isinstance(data, dict)):
data.update({'f': 'json',
'lang': 'zh_CN',
'ajax': 1,
'token': self.token,
'random': random.random()})
if 't' not in data.keys():
data.update({'t': 'ajax-response'})
resp = self._opener.open(url, urllib.urlencode(data))
else:
req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req)
else:
data.update({'token': self.token, 'f': 'json', 'lang': 'zh_CN'})
resp = self._opener.open(url + "?" + urllib.urlencode(data))
if resp.getcode() in [200, 302, 304]:
msg = resp.read()
break
except:
print traceback.format_exc()
time.sleep(1)
if not msg:
return False
self.lastMsg = msg
# 非json格式直接返回msg
print 'msg', msg, type(msg)
if jsonformat:
try:
msg = json.loads(msg)
except:
import chardet
msg = json.loads( msg.decode( chardet.detect(msg)['encoding'] ) )
else:
return msg
# 结果是json格式,判断发送的结果,现在只有两种
if 'base_resp' in msg:
ret = int(msg['base_resp']['ret'])
else:
ret = int(msg['ret'])
# 判断返回的结果
if ret == 0:
return msg
else:
time.sleep(1)
if ret == -3:
# token过期,重新登录
print "token expired, relogin"
self.login(force=True)
return self._send_request(url, data, headers, method, jsonformat)
elif ret == -18:
# ticket过期,重新获取
self.getTicket(force=True)
print "ticket expired,reget"
return self._send_request(url, data, headers, method, jsonformat)
else:
#error
print str(msg)
return False
def upload_img(self, img_url=""):
self._ensure_login()
ticket, ticket_id = self._get_ticket()
if not ticket:
return False
url = 'https://mp.weixin.qq.com/cgi-bin/filetransfer?action=upload_material&f=json' \
'&writetype=doublewrite&groupid=1&ticket_id={0}&ticket={1}&token={2}&lang=zh_CN'.format(
ticket_id,
ticket,
self.token)
params = {'file': open(img_url, "rb")}
data, headers = poster.encode.multipart_encode(params)
headers.update({
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Connection': 'keep-alive',
'Host': 'mp.weixin.qq.com',
'Origin': 'https://mp.weixin.qq.com',
'Referer': 'https://mp.weixin.qq.com/cgi-bin/filepage?type=2&begin=0&count=10&t=media/list&token=%s&lang=zh_CN' % self.token,
})
ret = self._send_request(url, data, headers)
if ret:
return ret['content']
else:
print ret
return False
def merge_appmsg_info(self, appMsg, index):
"""
根据图文信息合成发送格式
"""
soup = BeautifulSoup(appMsg.get('content', ''), 'html5lib')
imgs = soup.find_all('img')
for img in imgs:
url = img.get('src', img.get('data-src'))
if not url:
continue
if urlparse.urlparse(url).netloc == 'mmbiz.qlogo.cn':
continue
data = urllib2.urlopen(url).read()
im = Image.open(StringIO(data))
width = im.size[0]
ratio = im.size[1]/float(width)
filename = '/tmp/%s.%s' % (uuid.uuid4().hex, im.format.lower())
with open(filename, 'wb') as fp:
fp.write(data)
src = self.uploadAppImg(filename)
os.remove(filename)
if src:
img.attrs['src'] = src
img.attrs['data-src'] = src
img.attrs['data-ratio'] = ratio
img.attrs['data-w'] = width
appMsg['content'] = soup.body.renderContents()
# 使用getAppMsg时,返回的参数中,fileid为file_id, sourceurl为source_url
return {
'title%d' % index: tornado.escape.xhtml_unescape(appMsg.get('title', '')),
'content%d' % index: tornado.escape.xhtml_unescape(appMsg.get('content', '')),
'digest%d' % index: tornado.escape.xhtml_unescape(appMsg.get('digest', '')),
'author%d' % index: tornado.escape.xhtml_unescape(appMsg.get('author', '')),
'fileid%d' % index: appMsg.get('file_id', appMsg.get('fileid', '')),
'sourceurl%d' % index: appMsg.get('source_url', appMsg.get('sourceurl', '')),
'show_cover_pic%d' % index: appMsg.get('show_cover_pic', 0),
'shortvideofileid%d' % index: appMsg.get('shortvideofileid', ''),
'copyright_type%d' % index: appMsg.get('copyright_type', 0),
'can_reward%d' % index: appMsg.get('can_reward', 0),
'reward_wording%d' % index: appMsg.get('reward_wording', ''),
'releasefirst%d' % index: appMsg.get('releasefirst', 0),
'can_reward%d' % index: appMsg.get('can_reward', 0),
'reward_wording%d' % index: appMsg.get('reward_wording', ''),
'reprint_permit_type%d' % index: appMsg.get('reprint_permit_type', 0),
'original_article_type%d' % index: appMsg.get('original_article_type', ''),
'need_open_comment%d' % index: appMsg.get('need_open_comment', 1),
}
def packet_appmsg(self, appMsgs):
"""
打包图文
"""
ret = {}
if isinstance(appMsgs, dict):
appMsgs = [appMsgs]
for index in xrange(len(appMsgs)):
appMsg = appMsgs[index]
if not appMsg.get('file_id', None):
if not (appMsg.get('title') and appMsg.get('content') and appMsg.get('cover')):
self.logger.info("必须要有一张标题、内容和封面图片")
continue
file_id = self.upload_img(appMsg['cover'])
appMsg['file_id'] = file_id
ret.update(self.merge_appmsg_info(appMsg, index))
return ret
def get_appmsg(self, AppMsgId, isMul=0):
"""
获取id为AppMsgId的图文信息
isMul表示是否是多图文
返回内容为appMsg类型的图文信息
"""
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {
'appmsgid': AppMsgId,
'isMul': isMul,
'type': 10,
't': 'media/appmsg_edit',
'action': 'edit'
}
ret = self._send_request(url, data, method="GET")
if ret:
app_msgs = json.loads(ret['app_msg_info'])['item'][0]['multi_item']
return app_msgs
def add_appmsg(self, appMsgs, AppMsgId=''):
"""
如果AppMsgId为空,则是增加图文,不为空,则是预览后保存图文
appMsgs里面包含如下内容:封面img(不可少),标题title,内容content,预览digest,
是否显示封面图片show_cover,作者author,来源sourceurl,是一个list,成员为dict
返回这个图文的id
"""
url = 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg'
data = {
'AppMsgId': AppMsgId,
'count': len(appMsgs) if isinstance(appMsgs, list) else 1,
'sub': 'update' if AppMsgId else 'create',
'type': 10
}
data.update(self.packet_appmsg(appMsgs))
ret = self._send_request(url, data)
if ret:
if AppMsgId:
return AppMsgId
else:
msgIds = self.get_msg_Ids()
if msgIds and len(msgIds):
return msgIds[0]
return False
def del_appmsg(self, AppMsgId):
"""
根据id删除图文
"""
url = 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg'
data = {
'AppMsgId': AppMsgId,
'sub': 'del'
}
ret = self._sendRequest(url, data)
if ret:
return True
else:
print ret
return False
def send_appmsg_by_id(self, sendTo, AppMsgId):
"""
通过图文ID发送图文
"""
ret = self._sendMsg(sendTo, {
'type': 10,
'app_id': AppMsgId,
'appmsgid': AppMsgId
})
return ret
def send_app_msg(self, sendTo, appMsgs, delete=True):
"""
主动推送图文
"""
AppMsgId = self.addAppMsg(appMsgs)
if not AppMsgId:
return False
ret = self.sendAppMsgById(sendTo, AppMsgId)
if delete:
self.delAppMsg(AppMsgId)
return ret
def get_msg_Ids(self, msgType='news', begin=0, count=None, detail=False):
"""
获取素材ID,type为'news','image','audio','video'
"""
if msgType == 'news':
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {'t': 'media/appmsg_list2',
'action': 'list_card',
'count': count or 10}
elif msgType == 'video':
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {'t': 'media/appmsg_list',
'action': 'list',
'count': count or 9}
elif msgType == 'image':
url = "https://mp.weixin.qq.com/cgi-bin/filepage"
data = {'1': 1,
't': 'media/img_list',
'count': count or 12}
else:
url = "https://mp.weixin.qq.com/cgi-bin/filepage"
data = {'t': 'media/list',
'count': count or 21}
data.update({
'type': self.msgType[msgType],
'begin': begin,
})
ret = self._send_request(url, data, method="GET")
if ret:
if msgType in ['news', 'video']:
msgs = ret['app_msg_info']['item']
ids = [item['app_id'] for item in msgs]
else:
msgs = ret['page_info']['file_item']
ids = [item['file_id'] for item in msgs]
if detail:
return msgs
else:
return ids
else:
return False
if __name__ == "__main__":
client = WeChatController(user='weChatController',force=True)
msg = client.check_notice()
print msg
| apache-2.0 | -3,363,681,692,529,769,500 | 36.066225 | 140 | 0.510631 | false |
jaeilepp/eggie | mne/io/kit/kit.py | 2 | 28437 | """Conversion tool from SQD to FIF
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
"""
# Author: Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import os
from os import SEEK_CUR
from struct import unpack
import time
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...coreg import (read_elp, fit_matched_points, _decimate_points,
get_ras_to_neuromag_trans)
from ...utils import verbose, logger
from ...transforms import apply_trans, als_ras_trans, als_ras_trans_mm
from ..base import _BaseRaw
from ..constants import FIFF
from ..meas_info import Info
from ..tag import _loc_to_trans
from .constants import KIT, KIT_NY, KIT_AD
from .coreg import read_hsp, read_mrk
from ...externals.six import string_types
class RawKIT(_BaseRaw):
"""Raw object from KIT SQD file adapted from bti/raw.py
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, verbose=None):
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
self._sqd_params = get_sqd_params(input_fname)
self._sqd_params['stimthresh'] = stimthresh
self._sqd_params['fname'] = input_fname
logger.info('Creating Raw.info structure...')
# Raw attributes
self.verbose = verbose
self.preload = False
self._projector = None
self.first_samp = 0
self.last_samp = self._sqd_params['nsamples'] - 1
self.comp = None # no compensation for KIT
self.proj = False
# Create raw.info dict for raw fif object with SQD data
self.info = Info()
self.info['meas_id'] = None
self.info['file_id'] = None
self.info['meas_date'] = int(time.time())
self.info['projs'] = []
self.info['comps'] = []
self.info['lowpass'] = self._sqd_params['lowpass']
self.info['highpass'] = self._sqd_params['highpass']
self.info['sfreq'] = float(self._sqd_params['sfreq'])
# meg channels plus synthetic channel
self.info['nchan'] = self._sqd_params['nchan'] + 1
self.info['bads'] = []
self.info['acq_pars'], self.info['acq_stim'] = None, None
self.info['filename'] = None
self.info['ctf_head_t'] = None
self.info['dev_ctf_t'] = []
self._filenames = []
self.info['dig'] = None
self.info['dev_head_t'] = None
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, string_types)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
if (mrk is not None and elp is not None and hsp is not None):
self._set_dig_kit(mrk, elp, hsp)
elif (mrk is not None or elp is not None or hsp is not None):
err = ("mrk, elp and hsp need to be provided as a group (all or "
"none)")
raise ValueError(err)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
ch_names = {}
ch_names['MEG'] = ['MEG %03d' % ch for ch
in range(1, self._sqd_params['n_sens'] + 1)]
ch_names['MISC'] = ['MISC %03d' % ch for ch
in range(1, self._sqd_params['nmiscchan'] + 1)]
ch_names['STIM'] = ['STI 014']
locs = self._sqd_params['sensor_locs']
chan_locs = apply_trans(als_ras_trans, locs[:, :3])
chan_angles = locs[:, 3:]
self.info['chs'] = []
for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
chan_angles), 1):
ch_name, ch_loc, ch_angles = ch_info
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idx
chan_info['scanno'] = idx
chan_info['range'] = KIT.RANGE
chan_info['unit_mul'] = KIT.UNIT_MUL
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_T
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
if idx <= self._sqd_params['nmegchan']:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
chan_info['kind'] = FIFF.FIFFV_MEG_CH
else:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
chan_info['eeg_loc'] = None
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
ch_angles = np.radians(ch_angles)
x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
z = np.cos(ch_angles[0])
vec_z = np.array([x, y, z])
length = linalg.norm(vec_z)
vec_z /= length
vec_x = np.zeros(vec_z.size, dtype=np.float)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
length = linalg.norm(vec_x)
vec_x /= length
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
chan_info['coil_trans'] = _loc_to_trans(chan_info['loc'])
self.info['chs'].append(chan_info)
# label trigger and misc channels
for idy, ch_name in enumerate(ch_names['MISC'] + ch_names['STIM'],
self._sqd_params['n_sens']):
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idy
chan_info['scanno'] = idy
chan_info['range'] = 1.0
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['unit_mul'] = 0 # default is 0 mne_manual p.273
chan_info['ch_name'] = ch_name
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['loc'] = np.zeros(12)
if ch_name.startswith('STI'):
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
else:
chan_info['kind'] = FIFF.FIFFV_MISC_CH
self.info['chs'].append(chan_info)
self.info['ch_names'] = (ch_names['MEG'] + ch_names['MISC'] +
ch_names['STIM'])
self._set_stimchannels(stim, slope)
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
assert len(self._data) == self.info['nchan']
# Create a synthetic channel
stim = self._sqd_params['stim']
trig_chs = self._data[stim, :]
if slope == '+':
trig_chs = trig_chs > stimthresh
elif slope == '-':
trig_chs = trig_chs < stimthresh
else:
raise ValueError("slope needs to be '+' or '-'")
trig_vals = np.array(2 ** np.arange(len(stim)), ndmin=2).T
trig_chs = trig_chs * trig_vals
stim_ch = trig_chs.sum(axis=0)
self._data[-1, :] = stim_ch
# Add time info
self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
self._times = np.arange(self.first_samp, self.last_samp + 1,
dtype=np.float64)
self._times /= self.info['sfreq']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def __repr__(self):
s = ('%r' % os.path.basename(self._sqd_params['fname']),
"n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
self.last_samp -
self.first_samp + 1))
return "<RawKIT | %s>" % ', '.join(s)
def read_stim_ch(self, buffer_size=1e5):
"""Read events from data
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
"""
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x, _ = self._read_segment(start=b_start, stop=b_stop, sel=pick)
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
if sel is None:
sel = list(range(self.info['nchan']))
elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
if start >= stop:
raise ValueError('No data in this range')
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(self.info['sfreq']),
(stop - 1) / float(self.info['sfreq'])))
with open(self._sqd_params['fname'], 'rb', buffering=0) as fid:
# extract data
fid.seek(KIT.DATA_OFFSET)
# data offset info
data_offset = unpack('i', fid.read(KIT.INT))[0]
nchan = self._sqd_params['nchan']
buffer_size = stop - start
count = buffer_size * nchan
pointer = start * nchan * KIT.SHORT
fid.seek(data_offset + pointer)
data = np.fromfile(fid, dtype='h', count=count)
data = data.reshape((buffer_size, nchan))
# amplifier applies only to the sensor channels
n_sens = self._sqd_params['n_sens']
sensor_gain = np.copy(self._sqd_params['sensor_gain'])
sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
self._sqd_params['amp_gain'])
conv_factor = np.array((KIT.VOLTAGE_RANGE /
self._sqd_params['DYNAMIC_RANGE'])
* sensor_gain, ndmin=2)
data = conv_factor * data
data = data.T
# Create a synthetic channel
trig_chs = data[self._sqd_params['stim'], :]
if self._sqd_params['slope'] == '+':
trig_chs = trig_chs > self._sqd_params['stimthresh']
elif self._sqd_params['slope'] == '-':
trig_chs = trig_chs < self._sqd_params['stimthresh']
else:
raise ValueError("slope needs to be '+' or '-'")
trig_vals = np.array(2 ** np.arange(len(self._sqd_params['stim'])),
ndmin=2).T
trig_chs = trig_chs * trig_vals
stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
data = np.vstack((data, stim_ch))
data = data[sel]
logger.info('[done]')
times = np.arange(start, stop) / self.info['sfreq']
return data, times
def _set_dig_kit(self, mrk, elp, hsp, auto_decimate=True):
"""Add landmark points and head shape data to the RawKIT instance
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
ALS coordinate system.
Parameters
----------
mrk : None | str | array_like, shape = (5, 3)
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more
than 10`000 points are in the head shape, they are automatically
decimated.
auto_decimate : bool
Decimate hsp points for head shape files with more than 10'000
points.
"""
if isinstance(hsp, string_types):
hsp = read_hsp(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
hsp = _decimate_points(hsp, 5)
n_new = len(hsp)
msg = ("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
"downsample is using FastScan.")
msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
logger.warning(msg)
if isinstance(elp, string_types):
elp_points = read_elp(elp)[:8]
if len(elp) < 8:
err = ("File %r contains fewer than 8 points; got shape "
"%s." % (elp, elp_points.shape))
raise ValueError(err)
elp = elp_points
if isinstance(mrk, string_types):
mrk = read_mrk(mrk)
hsp = apply_trans(als_ras_trans_mm, hsp)
elp = apply_trans(als_ras_trans_mm, elp)
mrk = apply_trans(als_ras_trans, mrk)
nasion, lpa, rpa = elp[:3]
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
elp = apply_trans(nmtrans, elp)
hsp = apply_trans(nmtrans, hsp)
# device head transform
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
self._set_dig_neuromag(elp[:3], elp[3:], hsp, trans)
def _set_dig_neuromag(self, fid, elp, hsp, trans):
"""Fill in the digitizer data using points in neuromag space
Parameters
----------
fid : array, shape = (3, 3)
Digitizer fiducials.
elp : array, shape = (5, 3)
Digitizer ELP points.
hsp : array, shape = (n_points, 3)
Head shape points.
trans : None | array, shape = (4, 4)
Device head transformation.
"""
trans = np.asarray(trans)
if fid.shape != (3, 3):
raise ValueError("fid needs to be a 3 by 3 array")
if elp.shape != (5, 3):
raise ValueError("elp needs to be a 5 by 3 array")
if trans.shape != (4, 4):
raise ValueError("trans needs to be 4 by 4 array")
nasion, lpa, rpa = fid
dig = [{'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD},
{'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD},
{'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD}]
for idx, point in enumerate(elp):
dig.append({'r': point, 'ident': idx, 'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
for idx, point in enumerate(hsp):
dig.append({'r': point, 'ident': idx,
'kind': FIFF.FIFFV_POINT_EXTRA,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
dev_head_t = {'from': FIFF.FIFFV_COORD_DEVICE,
'to': FIFF.FIFFV_COORD_HEAD, 'trans': trans}
self.info['dig'] = dig
self.info['dev_head_t'] = dev_head_t
def _set_stimchannels(self, stim='<', slope='-'):
"""Specify how the trigger channel is synthesized form analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
slope : '+' | '-'
'+' means a positive slope (low-to-high) on the event channel(s)
is used to trigger an event.
'-' means a negative slope (high-to-low) on the event channel(s)
is used to trigger an event.
"""
if self.preload:
err = "Can't change stim channel after preloading data"
raise NotImplementedError(err)
self._sqd_params['slope'] = slope
if isinstance(stim, str):
picks = pick_types(self.info, meg=False, ref_meg=False,
misc=True, exclude=[])[:8]
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
elif np.max(stim) >= self._sqd_params['nchan']:
msg = ("Tried to set stim channel %i, but squid file only has %i"
" channels" % (np.max(stim), self._sqd_params['nchan']))
raise ValueError(msg)
self._sqd_params['stim'] = stim
def get_sqd_params(rawfile):
"""Extracts all the information from the sqd file.
Parameters
----------
rawfile : str
Raw sqd file to be read.
Returns
-------
sqd : dict
A dict containing all the sqd parameter settings.
"""
sqd = dict()
sqd['rawfile'] = rawfile
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
fid.seek(KIT.BASIC_INFO)
basic_offset = unpack('i', fid.read(KIT.INT))[0]
fid.seek(basic_offset)
# skips version, revision, sysid
fid.seek(KIT.INT * 3, SEEK_CUR)
# basic info
sysname = unpack('128s', fid.read(KIT.STRING))
sysname = sysname[0].decode().split('\n')[0]
fid.seek(KIT.STRING, SEEK_CUR) # skips modelname
sqd['nchan'] = unpack('i', fid.read(KIT.INT))[0]
if sysname == 'New York University Abu Dhabi':
KIT_SYS = KIT_AD
elif sysname == 'NYU 160ch System since Jan24 2009':
KIT_SYS = KIT_NY
else:
raise NotImplementedError
# channel locations
fid.seek(KIT_SYS.CHAN_LOC_OFFSET)
chan_offset = unpack('i', fid.read(KIT.INT))[0]
chan_size = unpack('i', fid.read(KIT.INT))[0]
fid.seek(chan_offset)
sensors = []
for i in range(KIT_SYS.N_SENS):
fid.seek(chan_offset + chan_size * i)
sens_type = unpack('i', fid.read(KIT.INT))[0]
if sens_type == 1:
# magnetometer
# x,y,z,theta,phi,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=6))
elif sens_type == 2:
# axialgradiometer
# x,y,z,theta,phi,baseline,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=7))
elif sens_type == 3:
# planargradiometer
# x,y,z,theta,phi,btheta,bphi,baseline,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=9))
elif sens_type == 257:
# reference channels
sensors.append(np.zeros(7))
sqd['i'] = sens_type
sqd['sensor_locs'] = np.array(sensors)
# amplifier gain
fid.seek(KIT_SYS.AMPLIFIER_INFO)
amp_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(amp_offset)
amp_data = unpack('i', fid.read(KIT_SYS.INT))[0]
gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data)
>> KIT_SYS.GAIN1_BIT]
gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data)
>> KIT_SYS.GAIN2_BIT]
if KIT_SYS.GAIN3_BIT:
gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data)
>> KIT_SYS.GAIN3_BIT]
sqd['amp_gain'] = gain1 * gain2 * gain3
else:
sqd['amp_gain'] = gain1 * gain2
# filter settings
sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data)
>> KIT_SYS.LPF_BIT]
sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data)
>> KIT_SYS.HPF_BIT]
sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data)
>> KIT_SYS.BEF_BIT]
# only sensor channels requires gain. the additional misc channels
# (trigger channels, audio and voice channels) are passed
# through unaffected
fid.seek(KIT_SYS.CHAN_SENS)
sens_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(sens_offset)
sens = np.fromfile(fid, dtype='d', count=sqd['nchan'] * 2)
sensitivities = (np.reshape(sens, (sqd['nchan'], 2))
[:KIT_SYS.N_SENS, 1])
sqd['sensor_gain'] = np.ones(KIT_SYS.NCHAN)
sqd['sensor_gain'][:KIT_SYS.N_SENS] = sensitivities
fid.seek(KIT_SYS.SAMPLE_INFO)
acqcond_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(acqcond_offset)
acq_type = unpack('i', fid.read(KIT_SYS.INT))[0]
if acq_type == 1:
sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
_ = fid.read(KIT_SYS.INT) # initialized estimate of samples
sqd['nsamples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
else:
err = ("You are probably trying to load a file that is not a "
"continuous recording sqd file.")
raise ValueError(err)
sqd['n_sens'] = KIT_SYS.N_SENS
sqd['nmegchan'] = KIT_SYS.NMEGCHAN
sqd['nmiscchan'] = KIT_SYS.NMISCCHAN
sqd['DYNAMIC_RANGE'] = KIT_SYS.DYNAMIC_RANGE
return sqd
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, verbose=None):
"""Reader function for KIT conversion to FIF
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, verbose=verbose)
| bsd-2-clause | 406,977,401,301,382,140 | 40.574561 | 79 | 0.540001 | false |
JackWalpole/finite_strain | ellipsoid_visualisation.py | 1 | 1324 | #!/usr/bin/env python
"""Visualise finite strian ellipsoids"""
import numpy as np
from mayavi.api import Engine
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
from mayavi import mlab
def gen_ellipsoid(position,shape,orientation):
"""given the existence of a scene generate ellipsoid"""
surface = Surface()
source.add_module(surface)
actor = surface.actor
actor.property.opacity = 0.5
actor.property.color = tuple(np.random.rand(3))
actor.mapper.scalar_visibility = False
actor.property.backface_culling = True
actor.actor.orientation = orientation
actor.actor.origin = np.zeros(3)
actor.actor.position = position
actor.actor.scale = shape
return surface
engine = Engine()
engine.start()
scene = engine.new_scene()
# scene.scene.disable_render = True
source = ParametricSurface()
source.function = 'ellipsoid'
engine.add_source(source)
# start with a sphere
surface = gen_ellipsoid(np.zeros(3),np.ones(3),np.zeros(3))
for ii in range(100):
print ii
surface.actor.actor.set(scale = [1 + ii*.2,1,1])
# surfaces = []
# for ii in range(10):
# surfaces.append(gen_ellipsoid(np.random.rand(3),np.random.rand(3),np.random.rand(3)*360))
# scene.scene.disable_render = False
# mlab.show() | mit | -7,504,901,609,433,819,000 | 23.090909 | 95 | 0.699396 | false |
fabiocorneti/django-easytree | easytree/forms.py | 1 | 4661 | from django import forms
from django.utils.translation import ugettext_lazy as _
from easytree import utils
from easytree.exceptions import EasyTreeException
pos_map = {
'first-sibling': _('First sibling'),
'left': _('Previous sibling'),
'right': _('Next sibling'),
'last-sibling': _('Last sibling'),
'sorted-sibling': _('Sorted sibling'),
'first-child': _('First child'),
'last-child': _('Last child'),
'sorted-child': _('Sorted child')
}
class EasyTreeModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return u'%s %s' % (
u'>>>' * ((obj.depth or 1) -1),
super(EasyTreeModelChoiceField, self).label_from_instance(obj)
)
class BaseEasyTreeForm(forms.ModelForm):
toplevel_model_cache = None
def get_toplevel_model(self):
if not self.toplevel_model_cache:
self.toplevel_model_cache = utils.get_toplevel_model(self._meta.model)
return self.toplevel_model_cache
toplevel_model = property(get_toplevel_model)
def __init__(self, *args, **kwargs):
super(BaseEasyTreeForm, self).__init__(*args, **kwargs)
raw_relative_to = getattr(self.instance._easytree_meta, 'raw_relative_to', False)
choice_field_kwargs = {
'queryset': self.toplevel_model.objects.order_by('tree_id', 'lft'),
'required': False,
'label': _("Relative to %(modelname)s") % {'modelname': self.toplevel_model._meta.verbose_name}
}
if raw_relative_to:
choice_field_kwargs['widget'] = forms.TextInput
self.fields['relative_to'] = EasyTreeModelChoiceField(**choice_field_kwargs)
max_depth = getattr(self.instance._easytree_meta, 'max_depth', 0)
if max_depth == 1:
relative_positions_choices = ('left', 'right', 'first-sibling', 'last-sibling')
elif getattr(self.instance, 'node_order_by', None):
relative_positions_choices = ('sorted-sibling', 'sorted-child')
else:
relative_positions_choices = [k for k in pos_map.keys() if k not in ('sorted-sibling', 'sorted-child')]
self.fields['relative_position'] = forms.ChoiceField(
required=False,
choices=[('','-------')] + [(k, v) for k, v in pos_map.items() if k in relative_positions_choices],
label=_("Relative position")
)
def clean(self):
cleaned_data = self.cleaned_data
model = self.toplevel_model
relative_to = cleaned_data.get('relative_to')
relative_position = cleaned_data.get('relative_position')
if not self.instance.pk:
if not relative_to:
try:
model.objects.validate_root(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
if relative_position in ('last-child', 'first-child', 'sorted-child'):
try:
model.objects.validate_child(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
try:
model.objects.validate_sibling(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
if relative_to:
try:
model.objects.validate_move(self.instance, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
cleaned_data['relative_to'] = relative_to
return cleaned_data
def save(self, **kwargs):
instance = super(BaseEasyTreeForm, self).save(commit=False)
relative_to = self.cleaned_data.get('relative_to', None)
relative_position = self.cleaned_data.get('relative_position')
if relative_to:
instance.easytree_relative_position = relative_position
instance.easytree_relative_to = relative_to
if kwargs.get('commit', False):
instance.save()
return instance
| bsd-3-clause | -2,380,818,350,016,450,000 | 39.181034 | 125 | 0.566831 | false |
meithan/NFL_website | cgi-bin/QueryPage.py | 1 | 4651 | #!/usr/bin/env python
# Convenience script to issue MySQL queries to the DB
import cgi, cgitb
import sys
cgitb.enable()
from common import *
# =====================================
def showBadLogin():
print '<br><strong>You must have the proper credentials to access this page!</strong>'
print '</body></html>'
# =====================================
def showGoodPage():
# Get query string, if present
formdata = cgi.FieldStorage()
if formdata.has_key("Query"):
queryStr = formdata["Query"].value
else:
queryStr = ""
responseStr = ""
# =======================================
# Send query to DB, obtain response
if queryStr != "":
# Import MySQLdb module
library_loaded = False
responseStr += "Loading MySQLdb ..."
try:
sys.path.append('/home/meithanx/mysql')
import MySQLdb
library_loaded = True
except:
responseStr += "\nCouldn't load MySQLdb!\n"
if library_loaded:
responseStr += " Loaded.\n"
# Connect to DB
connected = False
responseStr += "Establishing connection to DB %s ..." % (DB_NAME)
try:
db = MySQLdb.connect("localhost","meithanx_sql","foobar",DB_NAME)
dbcursor = db.cursor()
connected = True
except:
responseStr += "Couldn't connect to DB %s!!\n" % (DB_NAME)
if connected:
responseStr += " Connected.\n"
responseStr += "===============================\n\n"
responseStr += "> %s\n\n" % (queryStr)
query = queryStr.strip()
dbcursor.execute(query)
db.commit()
rows_affected = dbcursor.rowcount
rowset = dbcursor.fetchall()
if len(rowset)==0:
responseStr += repr(rowset) + "\n"
for row in rowset:
responseStr += repr(row) + "\n"
responseStr += "%i rows processed.\n" % (rows_affected)
# =======================================
print '<form method="GET">'
print '<div style="width: 800px; margin:0 auto;">'
print '<br>'
print 'Query:<br>'
print '<textarea id="QueryField" name="Query" cols="40" rows="5" style="width: 800px;">%s</textarea>' % (queryStr)
print '<br>'
print '<input type="submit" value="Submit"> '
print '<input type="button" onClick="clearQueryField()" value="Clear">'
print ' Queries: <input type="button" onClick="enterSelect()" value="SELECT">'
print ' <input type="button" onClick="enterUpdate()" value="UPDATE">'
print ' <input type="button" onClick="enterInsert()" value="INSERT">'
print ' <input type="button" onClick="enterDelete()" value="DELETE">'
print ' <input type="button" onClick="enterDescribe()" value="DESCRIBE">'
print '<br>'
print '<hr>'
print '</form>'
print 'Response:<br>'
print '<textarea readonly id="Response" cols="40" rows="40" style="width: 800px;">%s</textarea>' % (responseStr)
print '<div>'
print '</body></html>'
# =====================================
print "Content-type:text/html"
print # THIS BLANK LIKE IS MANDATORY
print '<!DOCTYPE html>'
print '<html lang="en">'
print '<head>'
print '<script language="JavaScript">'
print 'function clearQueryField() {'
print ' document.getElementById("QueryField").value="";'
print '}'
print 'function enterSelect() {'
print ' document.getElementById("QueryField").value="SELECT * FROM table_name WHERE condition;";'
print '}'
print 'function enterUpdate() {'
print ' document.getElementById("QueryField").value="UPDATE table_name SET field=value WHERE condition;";'
print '}'
print 'function enterInsert() {'
print ' document.getElementById("QueryField").value="INSERT INTO table_name VALUES (value1,value2);";'
print '}'
print 'function enterDelete() {'
print ' document.getElementById("QueryField").value="DELETE FROM table_name WHERE condition;";'
print '}'
print 'function enterDescribe() {'
print ' document.getElementById("QueryField").value="DESCRIBE table_name;";'
print '}'
print '</script></head><body>'
# Determine logged user from cookie, if any
logged_user = authenticateUser()
### HACK!! USER AUTHENTICATION BYPASSED
print "<h3>WARNING: user authentication temporarily overriden! Don\'t forget to re-protect this page!</h3>"
showGoodPage()
#if logged_user != None and logged_user.Username == "Meithan":
# showGoodPage()
#else:
# showBadLogin() | gpl-3.0 | -4,550,998,764,767,151,000 | 31.760563 | 118 | 0.57622 | false |
traxex33/Twitter-Analysis | junk/mineTweets.py | 1 | 2336 | import tweepy
import json
from tweepy import OAuthHandler
from tweepy import Stream
from liveListener import Listener
class TweetMiner:
def __init__(self, config_fname='config'):
self._readdetails(config_fname)
self._authenticate()
def mine(self):
self.state = None
while self.state != '1' or self.state != '2':
print ("Press 1 to calculate popularity of a phrase. Press 2 to analyze a user profile.")
self.state = str(raw_input())
if self.state == '1' or self.state == '2':
break
print ("Enter a valid choice")
# Call functions
if self.state == '1':
return self.state, self.trackLiveTweets()
elif self.state == '2':
return self.state, self.getUserTweets()
# Tracking live tweets for popularity calculation
def trackLiveTweets(self):
print ("Enter a key word to track for 5 minutes. Be as specific as possible")
self.file = 'tweets.json'
self.trackWord = str(raw_input())
self.twitter_stream = Stream(self.auth, Listener(self.file))
self.twitter_stream.filter(track=[self.trackWord])
return self.file
# Getting tweets from user profile for analysis
def getUserTweets(self):
print ("Enter the user <screen_name> to track. For example, '@user' without the quotes.")
self.screenName = str(raw_input())
self.file = self.screenName + "_tweets.json"
open(self.file, 'w').close()
for status in tweepy.Cursor(self.api.user_timeline, screen_name=self.screenName).items(200):
with open(self.file, "a") as f:
json.dump(dict(status._json), f)
f.write('\n')
return self.file
def _readdetails(self, config_fname):
with open(config_fname, 'r') as f:
self.consumer_key = f.readline().replace('\n', '')
self.consumer_secret = f.readline().replace('\n', '')
self.access_token = f.readline().replace('\n', '')
self.access_secret = f.readline().replace('\n', '')
def _authenticate(self):
self.auth = OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_secret)
self.api = tweepy.API(self.auth)
| mit | -6,225,401,496,223,295,000 | 39.275862 | 101 | 0.606592 | false |
frRoy/Benchmarklib | benchmarklib/charts/tests/test_views.py | 1 | 1353 | from test_plus.test import TestCase
from django.test import Client
# from unittest import skip
class Testnew_chart(TestCase):
def setUp(self):
self.client = Client()
# self.client.login(username='fred', password='secret')
def test_charts_renders_new_charts(self):
response = self.client.get('/charts/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'charts/new_chart.html')
def test_new_charts_can_pass_session_content_after_POST(self):
str = '1,2\r\n3,4\r\n5,6'
response = self.client.post('/charts/', {'data': str})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.client.session['content'], str)
def test_new_charts_redirects_after_POST(self):
response = self.client.post('/charts/', {'data': '1,2'}, follow=True)
last_url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, 200)
self.assertEqual(last_url, '/charts/preview/')
self.assertTemplateUsed(response, 'charts/preview_chart.html')
def test_new_charts_contains_Preview_Chart(self):
response = self.client.get('/charts/')
self.assertContains(response, "Preview Chart")
class Testpreview_chart(TestCase):
def setUp(self):
self.client = Client()
| bsd-3-clause | 2,509,228,727,055,043,600 | 34.605263 | 77 | 0.66371 | false |
pmav99/praktoras | utils/service_discovery/config_stores.py | 1 | 2131 | # (C) Fractal Industries, Inc. 2016
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from utils.service_discovery.abstract_config_store import AbstractConfigStore
from utils.service_discovery.abstract_config_store import CONFIG_FROM_AUTOCONF, CONFIG_FROM_FILE, CONFIG_FROM_TEMPLATE, TRACE_CONFIG # noqa imported somewhere else
from utils.service_discovery.etcd_config_store import EtcdStore
from utils.service_discovery.consul_config_store import ConsulStore
SD_CONFIG_BACKENDS = ['etcd', 'consul'] # noqa: used somewhere else
SD_TEMPLATE_DIR = '/conmon/check_configs'
def get_config_store(agentConfig):
if agentConfig.get('sd_config_backend') == 'etcd':
return EtcdStore(agentConfig)
elif agentConfig.get('sd_config_backend') == 'consul':
return ConsulStore(agentConfig)
elif agentConfig.get('sd_config_backend') is None:
return StubStore(agentConfig)
def extract_sd_config(config):
"""Extract configuration about service discovery for the agent"""
sd_config = {}
if config.has_option('Main', 'sd_config_backend'):
sd_config['sd_config_backend'] = config.get('Main', 'sd_config_backend')
else:
sd_config['sd_config_backend'] = None
if config.has_option('Main', 'sd_template_dir'):
sd_config['sd_template_dir'] = config.get(
'Main', 'sd_template_dir')
else:
sd_config['sd_template_dir'] = SD_TEMPLATE_DIR
if config.has_option('Main', 'sd_backend_host'):
sd_config['sd_backend_host'] = config.get(
'Main', 'sd_backend_host')
if config.has_option('Main', 'sd_backend_port'):
sd_config['sd_backend_port'] = config.get(
'Main', 'sd_backend_port')
return sd_config
class StubStore(AbstractConfigStore):
"""Used when no valid config store was found. Allow to use auto_config."""
def _extract_settings(self, config):
pass
def get_client(self):
pass
def crawl_config_template(self):
# There is no user provided templates in auto_config mode
return False
| bsd-3-clause | -2,290,798,264,232,469,200 | 35.741379 | 164 | 0.679493 | false |
henriquegemignani/randovania | randovania/games/prime/patcher_file_lib/hint_formatters.py | 1 | 3971 | import typing
from randovania.game_description import node_search
from randovania.game_description.area import Area
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.hint import Hint, HintLocationPrecision, RelativeDataArea, HintRelativeAreaName
from randovania.game_description.node import PickupNode
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.world_list import WorldList
from randovania.games.prime.patcher_file_lib import hint_lib
class LocationFormatter:
def format(self, determiner: hint_lib.Determiner, pickup_name: str, hint: Hint) -> str:
raise NotImplementedError()
class GuardianFormatter(LocationFormatter):
_GUARDIAN_NAMES = {
PickupIndex(43): "Amorbis",
PickupIndex(79): "Chykka",
PickupIndex(115): "Quadraxis",
}
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
guardian = hint_lib.color_text(hint_lib.TextColor.GUARDIAN, self._GUARDIAN_NAMES[hint.target])
return f"{guardian} is guarding {determiner}{pickup}."
class TemplatedFormatter(LocationFormatter):
def __init__(self, template: str, area_namer: hint_lib.AreaNamer):
self.template = template
self.hint_name_creator = area_namer
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
node_name = self.hint_name_creator.location_name(
hint.target,
hint.precision.location == HintLocationPrecision.WORLD_ONLY
)
return self.template.format(determiner=determiner,
pickup=pickup,
node=node_name)
class RelativeFormatter(LocationFormatter):
def __init__(self, world_list: WorldList, patches: GamePatches):
self.world_list = world_list
self.patches = patches
self._index_to_node = {
node.pickup_index: node
for node in world_list.all_nodes
if isinstance(node, PickupNode)
}
def _calculate_distance(self, source_location: PickupIndex, target: Area) -> int:
source = self._index_to_node[source_location]
return node_search.distances_to_node(self.world_list, source,
patches=self.patches, ignore_elevators=False)[target]
def relative_format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint, other_area: Area, other_name: str,
) -> str:
distance = self._calculate_distance(hint.target, other_area) + (hint.precision.relative.distance_offset or 0)
if distance == 1:
distance_msg = "one room"
else:
precise_msg = "exactly " if hint.precision.relative.distance_offset is None else "up to "
distance_msg = f"{precise_msg}{distance} rooms"
return (f"{determiner.title}{pickup} can be found "
f"{hint_lib.color_text(hint_lib.TextColor.LOCATION, distance_msg)} away from {other_name}.")
def format(self, determiner: hint_lib.Determiner, pickup_name: str, hint: Hint) -> str:
raise NotImplementedError()
class RelativeAreaFormatter(RelativeFormatter):
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
relative = typing.cast(RelativeDataArea, hint.precision.relative)
other_area = self.world_list.area_by_area_location(relative.area_location)
if relative.precision == HintRelativeAreaName.NAME:
other_name = self.world_list.area_name(other_area)
elif relative.precision == HintRelativeAreaName.FEATURE:
raise NotImplementedError("HintRelativeAreaName.FEATURE not implemented")
else:
raise ValueError(f"Unknown precision: {relative.precision}")
return self.relative_format(determiner, pickup, hint, other_area, other_name)
| gpl-3.0 | -1,242,443,017,889,526,800 | 44.125 | 122 | 0.674893 | false |
digibyte/digibyte | test/functional/p2p_fingerprint.py | 1 | 5905 | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generate(nblocks=10)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generate(nblocks=1)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| mit | 5,094,438,436,305,804,000 | 37.848684 | 79 | 0.659949 | false |
vlimant/IntelROCCS | Monitor/plotFromPickle.py | 1 | 21846 | #!/usr/bin/python
'''==============================================================================
This script reads information from the pickle caches and directly
makes plots
=============================================================================='''
import os, sys
import re, glob, subprocess, time
from findDatasetHistoryAll import *
import findDatasetProperties as fDP
import cPickle as pickle
import ROOT
from array import array
from Dataset import *
from operator import itemgetter
genesis=1378008000
nowish = time.time()
loadedStyle=False
rc=None
try:
monitorDB = os.environ['MONITOR_DB']
except KeyError:
sys.stderr.write('\n ERROR - a environment variable is not defined\n\n')
sys.exit(2)
'''==============================================================================
H E L P E R S
=============================================================================='''
def addData(nAllAccessed,nAccessed,debug=0):
# adding a hash array (nAccessed) to the mother of all hash arrays (nAllAccessed)
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nAllAccessed:
nAllAccessed[key] += nAccessed[key]
else:
nAllAccessed[key] = nAccessed[key]
# return the updated all hash array
return nAllAccessed
def addSites(nSites,nAccessed,debug=0):
# adding up the number of sites for each dataset
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nSites:
nSites[key] += 1
else:
nSites[key] = 1
# return the updated all hash array
return nSites
def convertSizeToGb(sizeTxt):
# first make sure string has proper basic format
if len(sizeTxt) < 3:
print ' ERROR - string for sample size (%s) not compliant. EXIT.'%(sizeTxt)
sys.exit(1)
# this is the text including the size units, that need to be converted
sizeGb = float(sizeTxt[0:-2])
units = sizeTxt[-2:]
# decide what to do for the given unit
if units == 'MB':
sizeGb = sizeGb/1000.
elif units == 'GB':
pass
elif units == 'TB':
sizeGb = sizeGb*1000.
else:
print ' ERROR - Could not identify size. EXIT!'
sys.exit(1)
# return the size in GB as a float
return sizeGb
def calculateAverageNumberOfSites(sitePattern,datasetSet,fullStart,end,datasetPattern):
# calculate the average number of replicas (sites) for a dataset in a given time interval
# print ' Relevant time interval: %s %s --> %d'%(time.strftime("%Y-%m-%d",time.gmtime(fullStart))\
# ,time.strftime("%Y-%m-%d",time.gmtime(end)),end-fullStart)
print ' Relevant time interval: %s %s --> %d'%(fullStart,end,end-fullStart)
# convert it into floats and take care of possible rounding issues
fullInterval = end - fullStart
predictedDatasetsOnSites={}
for datasetName in datasetSet:
predictedDatasetsOnSites[datasetName]=set([])
nSites = {}
timeOnSites = {} #timeOnSites[dataset][site] = timef
# match the intervals from the phedex history to the requested time interval
#===========================================================================
for datasetName,datasetObject in datasetSet.iteritems():
verb = (datasetName=='/GluGluZH_HToWW_M120_13TeV_powheg_pythia8/RunIIFall15MiniAODv1-PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/MINIAODSIM')
if not re.match(datasetPattern,datasetName):
continue
# don't penalize a dataset for not existing
cTime = datasetObject.cTime
#start = max(fullStart,cTime)
start = fullStart
interval = end - start
if verb:
print fullStart,end,start,cTime,interval
if not datasetName in nSites:
nSites[datasetName] = 0
timeOnSites[datasetName] = {}
for siteName,movement in datasetObject.movement.iteritems():
if not re.match(sitePattern,siteName): # only requested sites
continue
if not siteName in timeOnSites[datasetName]:
timeOnSites[datasetName][siteName] = 0
xfers = movement[0]
dels = movement[1]
if verb:
print siteName
print '\t',xfers
print '\t',dels
lenXfer = len(xfers)
lenDel = len(dels)
if lenDel == lenXfer - 1:
dels.append(nowish)
# find this site's fraction for nSites
if not datasetName in nSites:
nSites[datasetName] = 0
siteSum = 0
i = 0
while i < lenXfer:
try:
tXfer = xfers[i]
tDel = dels[i]
except IndexError:
break
i = i + 1 # iterate before all continue statements
# four ways to be in interval
# (though this prevents you from having the same
# start and end date)
if tXfer <= start <= end <= tDel:
siteSum += 1 # should happen at most once (?)
elif tXfer <= start < tDel < end:
siteSum += float(tDel - start)/float(interval)
elif start < tXfer < end <= tDel:
siteSum += float(end - tXfer)/float(interval)
elif start < tXfer < tDel <= end:
siteSum += float(tDel - tXfer)/float(interval)
else: # have ensured tXfer > tDel
continue
if verb:
print '\t',siteSum
if siteSum>0:
timeOnSites[datasetName][siteName] += siteSum
nSites[datasetName] += siteSum
n = 0
nSkip = 0
Sum = float(0)
for datasetName in nSites:
if nSites[datasetName] == 0: # dataset not on sites in interval
nSkip += 1
continue
Sum += nSites[datasetName]
n += 1
return nSites,timeOnSites
def makeActualPlots(sitePattern,start,end,jarFile,crbLabel='',rootFile='',makeSummaryPlots=False):
if crbLabel!='' and rootFile=='':
sys.stderr.write('ERROR [plotFromPickle.makeActualPlots]: If crbLabel is defined, rootFile must be defined')
return
groupPattern = os.environ['MONITOR_GROUP']
datasetPattern = os.environ['MONITOR_PATTERN']
datasetPattern = datasetPattern.replace("_",".*")
groupPattern = groupPattern.replace("_",".*")
interval = float(end - start)/float(86400*30) # time interval in months, used for plotting
sitePattern=re.sub("\*",".*",sitePattern) # to deal with stuff like T2* --> T2.*
print "\n = = = = S T A R T A N A L Y S I S = = = =\n"
print " Dataset pattern: %s"%(datasetPattern)
print " Group pattern: %s"%(groupPattern)
print " Site pattern: %s"%(sitePattern)
pickleJar = None
if type(jarFile)==type(''):
pickleJar = open(jarFile,"rb")
pickleDict = pickle.load(pickleJar)
else:
pickleDict = jarFile
datasetSet = pickleDict["datasetSet"]
nSiteAccess = pickleDict["nSiteAccess"]
# last step: produce plots!
global loadedStyle,rc
if not loadedStyle:
stylePath = os.environ.get("MIT_ROOT_STYLE")
print stylePath
rc = ROOT.gROOT.LoadMacro(stylePath) # figure out using so later
if rc:
print "Warning, MitRootStyle could not be loaded from %s"%(stylePath)
else:
ROOT.MitRootStyle.Init()
loadedStyle=True
print 'MAKING SUMMARY:',makeSummaryPlots
if makeSummaryPlots:
c11 = ROOT.TCanvas("c11","c11",800,800)
nTiers=7
hVolumeFrac = ROOT.TH1F("hVolumeFrac","hVolumeFrac",nTiers,-0.5,nTiers-.5)
hUsageFrac = ROOT.TH1F("hUsageFrac","hUsageFrac",nTiers,-0.5,nTiers-.5)
tiers = {'AODSIM':0, 'AOD':1, 'MINIAODSIM':2,'MINIAOD':3,'GEN-SIM-RAW':4,'GEN-SIM-RECO':5,'Other':6}
for hist in [hUsageFrac,hVolumeFrac]:
xaxis = hist.GetXaxis()
for tier,nBin in tiers.iteritems():
xaxis.SetBinLabel(nBin+1,tier)
totalVolume=0
totalUsage=0
siteAccessDict = {}
miniaodSizeNoRepl=0
miniaodSizeRepl=0
for datasetName,datasetObject in datasetSet.iteritems():
tier = datasetName.split('/')[-1]
datasetVolume = max(0,len(datasetObject.currentSites)*datasetObject.sizeGB)
if tier.find('MINIAOD')>=0 and len(datasetObject.currentSites)>0:
# print datasetName,datasetObject.sizeGB
miniaodSizeNoRepl += datasetObject.sizeGB
miniaodSizeRepl += datasetVolume
datasetUsage = 0
for s,a in datasetObject.nAccesses.iteritems():
if not re.match(sitePattern,s):
continue
if s not in siteAccessDict:
siteAccessDict[s] = [0,0]
for t,n in a.iteritems():
if (nowish-t)<(86400*30):
datasetUsage+=n
totalVolume += datasetVolume
totalUsage += datasetUsage
if tier not in tiers:
tier = 'Other'
if tier in tiers:
val = tiers[tier]
hVolumeFrac.Fill(val,datasetVolume)
hUsageFrac.Fill(val,datasetUsage)
hVolumeFrac.Scale(1./totalVolume)
hUsageFrac.Scale(1./totalUsage)
for hist in [hUsageFrac,hVolumeFrac]:
ROOT.MitRootStyle.InitHist(hist,"","",1)
hVolumeFrac.GetYaxis().SetTitle('current volume fraction')
hUsageFrac.GetYaxis().SetTitle('usage fraction (30 days)')
# hUsageFrac.SetNormFactor()
for hist in [hUsageFrac,hVolumeFrac]:
hist.SetFillColor(8)
hist.SetLineColor(8)
hist.SetFillStyle(1001)
hist.SetMinimum(0.)
hist.SetTitle('')
c11.Clear()
c11.cd()
c11.SetBottomMargin(.2)
c11.SetRightMargin(.2)
hist.Draw("hist")
if hist==hVolumeFrac:
c11.SaveAs(monitorDB+'/FractionVolume_%s.png'%(groupPattern))
else:
c11.SaveAs(monitorDB+'/FractionUsage_%s.png'%(groupPattern))
print "no replication ",miniaodSizeNoRepl
print "with replication",miniaodSizeRepl
c21 = ROOT.TCanvas("c21","c21",1000,600)
for h in [hVolumeFrac,hUsageFrac]:
h.Delete()
return
print "Computing average number of sites"
nAverageSites,timeOnSites = calculateAverageNumberOfSites(sitePattern,datasetSet,start,end,datasetPattern)
'''==============================================================================
our usage plots
=============================================================================='''
cUsage = ROOT.TCanvas("c1","c1",800,800)
maxBin = 8
nBins = 60.
l = []
low = 0
i = 0
while low < maxBin:
l.append(low)
low += (maxBin/nBins) * (1.1)**(i)
i += 1
l.append(maxBin)
hUsage = ROOT.TH1F("dataUsage","Data Usage",len(l)-1,array('f',l))
# delta = float(maxBin)/(2*(nBins-1)) # so that bins are centered
# hUsage = ROOT.TH1F("dataUsage","Data Usage",nBins,-delta,maxBin+delta)
kBlack = 1
if not rc:
ROOT.MitRootStyle.InitHist(hUsage,"","",kBlack)
titles = "; Accesses/month; Fraction of total data volume"
hUsage.SetTitle(titles)
meanVal = 0.
sumWeight = 0.
nEntries = 0
totalSize = 0
for datasetName,datasetObject in datasetSet.iteritems():
if not re.match(datasetPattern,datasetName):
# print "did not match pattern"
continue
if datasetObject.nFiles==0:
# what
continue
nSitesAv = nAverageSites[datasetName]
if nSitesAv == 0:
# it was nowhere
continue
nAccess = 0
for siteName in datasetObject.nAccesses:
if not re.match(sitePattern,siteName):
# maybe we should get rid of this functionality to speed things up
continue
for utime,n in datasetObject.nAccesses[siteName].iteritems():
if utime >= start and utime <= end:
nAccess += n
value = float(nAccess)/float(datasetObject.nFiles*nSitesAv*interval)
weight = float(nSitesAv) * float(datasetObject.sizeGB)/1000.
totalSize += weight
meanVal += value
sumWeight += weight
nEntries += 1
hUsage.Fill(min(maxBin,value),weight)
#totalSize = hUsage.Integral()
print "Found %i datasets, corresponding to an average volume of %3f PB"%(nEntries,float(totalSize)/1000.)
if (sumWeight==0):
sumWeight=1;
totalSize=1;
meanVal = meanVal/sumWeight
hUsage.Scale(1./float(totalSize))
maxy = hUsage.GetMaximum()
hUsage.SetMaximum(maxy*10.)
ROOT.gPad.SetLogy(1) # big zero bins
cUsage.cd()
try:
histColor = os.environ['MONITOR_COLOR']
hUsage.SetLineColor(histColor)
except KeyError:
pass
hUsage.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
ROOT.MitRootStyle.AddText("Overflow added to last bin.")
if groupPattern == ".*":
groupPattern = "All"
integralTexts = []
integralTexts.append( "Group: %s"%(groupPattern) )
integralTexts.append( "Period: [%s, %s]\n"%( strftime("%Y-%m-%d",gmtime(start)) , strftime("%Y-%m-%d",gmtime(end)) ) )
integralTexts.append( "Average data managed: %.3f PB\n"%(totalSize/1000.) )
# integralTexts.append( "Mean: %.3f accesses/month\n"%( meanVal ) )
integralTexts.append( "Mean: %.3f accesses/month\n"%(hUsage.GetMean()) )
positions = [0.85,0.8, 0.75, 0.7]
plotTText = [None,None,None,None]
for i in range(4):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cUsage.SaveAs(monitorDB+"/Usage_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cUsage.SaveAs(monitorDB+"/Usage_%s_%i_%i.png"%(groupPattern,start,end))
# houtFile = ROOT.TFile(monitorDB+"/outfile.root","UPDATE")
# houtFile.cd()
# hUsage.Write("%s_%s"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
# houtFile.Close()
'''==============================================================================
CRB usage plots
=============================================================================='''
ROOT.gPad.SetLogy(0)
hCRB = ROOT.TH1F("CRBUsage","Data Usage",17,-1.5,15.5)
hZeroOne = ROOT.TH1F("CRBZeroOne","Zero and One Bin",100,0,1.);
hTime = ROOT.TH1F("time","time",100,-0.1,1.1);
if not rc:
ROOT.MitRootStyle.InitHist(hCRB,"","",kBlack)
ROOT.MitRootStyle.InitHist(hZeroOne,"","",kBlack)
ROOT.MitRootStyle.InitHist(hTime,"","",kBlack)
titles = "; <n_{accesses}>; Prorated data volume [TB]"
hCRB.SetTitle(titles)
hZeroOne.SetTitle(titles)
titles = "; Prorated Time Fraction; Data volume [TB]"
hTime.SetTitle(titles)
cCRB = ROOT.TCanvas("c2","c2",800,800)
cZeroOne = ROOT.TCanvas("c3","c3",800,800)
cTime = ROOT.TCanvas("c4","c4",800,800)
for datasetName,datasetObject in datasetSet.iteritems():
if datasetObject.cTime>end:
continue
if not re.match(datasetPattern,datasetName):
continue
if datasetObject.nFiles==0:
# what
continue
sizeGB = datasetObject.sizeGB
for siteName in datasetObject.movement:
if not re.match(sitePattern,siteName):
continue
timeOnSite = timeOnSites[datasetName][siteName]
# print timeOnSite
value = 0
if siteName in datasetObject.nAccesses:
for utime,n in datasetObject.nAccesses[siteName].iteritems():
if utime <= end and utime >= start:
value += float(n)/datasetObject.nFiles
fillValue = min(max(1,value), 14.5)
# if value < 1:
# print value,fillValue
if value == 0:
if datasetObject.cTime > start:
fillValue = 0
else:
fillValue = -1
weight = float(sizeGB * timeOnSite)/1000.
# print datasetObject
# print fillValue,weight
# sys.exit(-1)
hCRB.Fill(fillValue,weight)
if (fillValue == 0) or (fillValue == 1):
hZeroOne.Fill(value,weight)
hTime.Fill(timeOnSite,sizeGB/1000.)
try:
histColor = os.environ['MONITOR_COLOR']
hCRB.SetLineColor(histColor)
hTime.SetLineColor(histColor)
hZeroOne.SetLineColor(histColor)
except KeyError:
pass
if crbLabel!='':
print 'Updating',rootFile
fSave = ROOT.TFile(rootFile,'UPDATE')
histName = 'h_'+os.environ['MONITOR_PLOTTEXT']
fSave.WriteTObject(hCRB,histName,"Overwrite")
# fSave.WriteTObject(hTime,histName+'_time','Overwrite')
fSave.Close()
xaxis = hCRB.GetXaxis()
xaxis.SetBinLabel(1,"0 old")
xaxis.SetBinLabel(2,"0 new")
xaxis.SetBinLabel(3,"< 1")
xaxis.SetBinLabel(4,"2")
xaxis.SetBinLabel(5,"3")
xaxis.SetBinLabel(6,"4")
xaxis.SetBinLabel(7,"5")
xaxis.SetBinLabel(8,"6")
xaxis.SetBinLabel(9,"7")
xaxis.SetBinLabel(10,"8")
xaxis.SetBinLabel(11,"9")
xaxis.SetBinLabel(12,"10")
xaxis.SetBinLabel(13,"11")
xaxis.SetBinLabel(14,"12")
xaxis.SetBinLabel(15,"13")
xaxis.SetBinLabel(16,"14")
xaxis.SetBinLabel(17,">14")
cCRB.cd()
hCRB.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
ROOT.MitRootStyle.AddText("Overflow in last bin.")
totalSize = hCRB.Integral()
integralTexts = ["Period: [%s, %s]\n"%( strftime("%Y-%m-%d",gmtime(start)) , strftime("%Y-%m-%d",gmtime(end)) )]
integralTexts.append( "Average data on disk: %.3f PB\n"%(totalSize/1000.) )
positions = [0.8,0.75]
plotTText = [None,None]
for i in range(2):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
if groupPattern == ".*":
groupPattern = "All"
try:
hCRB.SaveAs(monitorDB+"/CRBUsage_%s_%s.C"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
cCRB.SaveAs(monitorDB+"/CRBUsage_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cCRB.SaveAs(monitorDB+"/CRBUsage_%s_%i_%i.png"%(groupPattern,start,end))
cZeroOne.cd()
hZeroOne.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
plotTText = [None,None]
for i in range(1):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cZeroOne.SaveAs(monitorDB+"/CRB01_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cZeroOne.SaveAs(monitorDB+"/CRB01_%s_%i_%i.png"%(groupPattern,start,end))
cTime.cd()
hTime.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
plotTText = [None,None]
for i in range(1):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cTime.SaveAs(monitorDB+"/CRBTime_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cTime.SaveAs(monitorDB+"/CRBTime_%s_%i_%i.png"%(groupPattern,start,end))
if pickleJar:
pickleJar.close()
for h in [hUsage,hCRB,hZeroOne,hTime]:
h.Delete()
if __name__ == '__main__':
'''==============================================================================
M A I N
=============================================================================='''
debug = 0
sizeAnalysis = True
addNotAccessedDatasets = True
usage = "\n"
usage += " plottingWrapper.py <sitePattern> <startDate> <endDate> <pickleJar>\n"
usage += "\n"
usage += " sitePattern - pattern to select particular sites (ex. T2* or T2_U[SK]* etc.)\n"
usage += " startDate - epoch time of starting date\n"
usage += " endDate - epoch time of ending date\n"
usage += " pickleJar - *.pkl file containing the relevant aggregated data\n"
# decode command line parameters
crbLabel = ''
rootFile = ''
if len(sys.argv)>=5:
sitePattern = str(sys.argv[1])
start = max(genesis,int(sys.argv[2]))
end = min(nowish,int(sys.argv[3]))
jarFileName = str(sys.argv[4])
if len(sys.argv)>=6:
crbLabel = str(sys.argv[5])
rootFile = str(sys.argv[6])
makeSummaryPlots = False
elif len(sys.argv)==3:
sitePattern = sys.argv[1]
start = genesis
end = nowish
jarFileName = sys.argv[2]
makeSummaryPlots = True
else:
sys.stderr.write(' ERROR - wrong number of arguments\n')
sys.stderr.write(usage)
sys.exit(2)
makeActualPlots(sitePattern,start,end,jarFileName,crbLabel,rootFile,makeSummaryPlots)
| mit | 1,943,402,565,564,367,600 | 36.927083 | 151 | 0.564039 | false |
relekang/accio | accio/webhooks/tests.py | 1 | 1727 | import pytest
@pytest.fixture
def push_based_project(accio_project):
accio_project.deploy_on = 'push'
accio_project.save()
return accio_project
@pytest.fixture
def status_based_project(accio_project):
accio_project.deploy_on = 'status'
accio_project.save()
return accio_project
@pytest.mark.django_db
def test_github_push_should_deploy(mock_runners, push_based_project, github_webhooks):
response = github_webhooks(name='push', event='push')
assert response.content.decode() == 'Deploy queued'
assert response.status_code == 200
@pytest.mark.django_db
def test_github_push_should_not_deploy_other_branch(push_based_project, github_webhooks):
response = github_webhooks(name='push_other_branch', event='push')
assert response.content.decode() == 'Not on master branch'
assert response.status_code == 400
@pytest.mark.django_db
def test_github_status_success_should_deploy(mock_runners, status_based_project, github_webhooks):
response = github_webhooks(name='status_success', event='status')
assert response.content.decode() == 'Deploy queued'
assert response.status_code == 200
@pytest.mark.django_db
def test_github_status_failure_should_not_deploy(status_based_project, github_webhooks):
response = github_webhooks(name='status_failure', event='status')
assert response.content.decode() == 'Status is not success'
assert response.status_code == 400
@pytest.mark.django_db
def test_github_status_not_master_should_not_deploy(status_based_project, github_webhooks):
response = github_webhooks(name='status_not_master', event='status')
assert response.content.decode() == 'Not on master branch'
assert response.status_code == 400
| mit | -6,829,933,572,956,118,000 | 33.54 | 98 | 0.7348 | false |
opensource-expert/customers-formula | customers/customers_passwords.py | 1 | 4681 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: set ft=python:
#
# python password generator for customers
#
# Depend: pwqgen
#
# Usage:
# ./customers_passwords.py customers_top pillarpath/to/customers.sls pillarpath/user_passwords.yaml
#
# Output: Int, the number of created passwords in pillarpath/user_passwords.sls
#
# fileformat:
# /!\ input files are PURE yaml file format, no jinja
#
# customers.sls:
# customers_top:
# customers:
# client1: <-- key used for password match as username
# […]
# client2:
# client3:
#
# user_passwords.yaml: will be overwritten if any, don't put anything non-yaml
# client1:
# mysql: bla
# shell: piou
# websmtp: somepass_for_controling_email_from_the_web
# hash: $1$17391272$rgWtYpRIDVUrT202c89Fp1
# client2:
# mysql: somepassword
# shell: shelllpassword
# websmtp: my_web_pass_for_mail
# hash: $1$17391272$rgWtYpRIDVUrT202c89Fp1
# # one entry per customers name…
# unittest: See ../tests/test_customers_passwords.py
from __future__ import absolute_import
import subprocess
import sys
import yaml
import random
from collections import OrderedDict
def random_pass():
res = subprocess.check_output(["pwqgen"]).rstrip()
return res
def unix_pass(password):
saltpw = str(random.randint(2**10, 2**32))
args = ['openssl', 'passwd', '-1', '-salt', saltpw, password]
res = subprocess.check_output(args).rstrip()
return res
def read_yaml(filename):
f = open(filename)
data = yaml.safe_load(f)
f.close()
return data
def create_all_pass():
"""
retrun an OrderedDict of all password
new_pass['mysql'] = random_pass()
new_pass['shell'] = shell_pass
new_pass['hash'] = unix_pass(shell_pass)
"""
shell_pass = random_pass()
new_pass = OrderedDict()
new_pass['mysql'] = random_pass()
new_pass['shell'] = shell_pass
new_pass['websmtp'] = random_pass()
new_pass['hash'] = unix_pass(shell_pass)
return new_pass
def write_password_db_yaml(fname, passDB):
"""
write ordered password db, in an yaml compatible way.
"""
f = open(fname, 'w')
for u, passwd in passDB.items():
f.write("%s:\n" % u)
for k in passwd.keys():
f.write(" %s: %s\n" % (k, passwd[k]))
# this outputer as some difficulties with OrderedDict
# f.write(yaml.dump(passDB, default_flow_style=False))
f.close()
def update_missing_fields(passDB, force_hash=False):
"""
check for missing fields, if new fields have been added
loop over all fields, and complete if any.
if force_hash is True, recompute hashes
return number of updated records
"""
# fetch fields generated pass are ignored
fields = create_all_pass().keys()
n = 0
for u, passwd in passDB.items():
# check for new added possibly missing fields
for p in fields:
# reads this passsword
myp = passwd.get(p)
if (myp == None or myp == '') or (force_hash and p == 'hash'):
if p == 'hash':
hashed = unix_pass(passDB[u]['shell'])
passDB[u]['hash'] = hashed
elif p == 'shell':
# reset hash, will be computed in next loop
passDB[u]['hash'] = None
passDB[u][p] = random_pass()
else:
passDB[u][p] = random_pass()
# we have modified some entries
n += 1
return n
def main(customers_top, user_db, password_db):
userDB = read_yaml(user_db)
# we can handle non existant password file
try:
passDB = read_yaml(password_db)
except IOError as e:
passDB = {}
# hardcoded path to access data for customers
mysql_users = userDB[customers_top]['customers'].keys()
# keys names matching username are top level
if passDB:
user_with_pass = passDB.keys()
else:
# empty
user_with_pass = []
passDB = {}
missing_password = set(mysql_users) - set(user_with_pass)
n = 0
# add missing passwords
for u in missing_password:
passDB[u] = create_all_pass()
n += 1
# update is some new fields has been added
n += update_missing_fields(passDB)
# write back modified yaml
if n > 0:
write_password_db_yaml(password_db, passDB)
# return number of new created password entries
return n
if __name__ == '__main__':
customers_top = sys.argv[1]
user_db = sys.argv[2]
password_db = sys.argv[3]
print(main(customers_top, user_db, password_db))
| gpl-3.0 | -1,138,239,243,298,611,100 | 26.511765 | 100 | 0.598888 | false |
rickypc/dotfiles | .rflint.d/table.py | 1 | 3998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Robot Lint Rules - Lint rules for Robot Framework data files.
# Copyright (c) 2014, 2015, 2016 Richard Huang <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Robot Lint Rules - Lint rules for Robot Framework data files.
"""
from rflint.common import GeneralRule, WARNING
from rflint.parser import KeywordTable, Row, TestcaseTable
def _get_count(rows, has_steps=False):
"""Returns total breaklines."""
count = 0
rows = list(rows)
rows.reverse()
for row in rows:
if has_steps:
count += _get_count(row.steps)
if count > 0:
break
else:
line = row.cells if isinstance(row, Row) else row
if _is_breakline(line):
count += 1
else:
break
return count
def _get_rows(table):
"""Returns rows and step indicator."""
response = {
'has_steps': False,
'rows': [],
}
if isinstance(table, KeywordTable):
response['has_steps'] = True
response['rows'] = table.keywords
elif isinstance(table, TestcaseTable):
response['has_steps'] = True
response['rows'] = table.testcases
else:
response['rows'] = table.rows
return response
def _get_total(rows, has_steps=False):
"""Returns total rows and steps if applicable."""
total = len(rows)
if has_steps:
total += sum([len(row.statements) for row in rows])
return total
def _is_breakline(statement):
"""Returns True if statement is a breakline, False otherwise."""
return len(statement) == 1 and statement[0].strip() == ''
class TooFewTableBlankLines(GeneralRule):
"""Warn about tables without blank lines between each other.
"""
max_allowed = 1
message = 'Too few trailing blank lines in "%s" table.'
severity = WARNING
def apply(self, robot_file):
"""Apply the rule to given robot file."""
for table in robot_file.tables[:-1]:
response = _get_rows(table)
count = _get_count(**response)
total = _get_total(**response)
if count < self.max_allowed:
linenumber = table.linenumber + total
self.report(robot_file, self.message % table.name,
linenumber + self.max_allowed, 0)
def configure(self, max_allowed):
"""Configures the rule."""
self.max_allowed = int(max_allowed)
class TooManyTableBlankLines(GeneralRule):
"""Warn about tables with extra blank lines between each other.
"""
max_allowed = 1
message = 'Too many trailing blank lines in "%s" table.'
severity = WARNING
def apply(self, robot_file):
"""Apply the rule to given robot file."""
for table in robot_file.tables[:-1]:
response = _get_rows(table)
count = _get_count(**response)
total = _get_total(**response)
if count > self.max_allowed:
linenumber = (table.linenumber + total) - count
self.report(robot_file, self.message % table.name,
linenumber + self.max_allowed, 0)
def configure(self, max_allowed):
"""Configures the rule."""
self.max_allowed = int(max_allowed)
| mit | -4,098,794,181,597,314,600 | 32.316667 | 84 | 0.612806 | false |
IntelLabs/hpat | docs/source/buildscripts/user_guide_gen.py | 1 | 12520 | # -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from module_info import get_function, get_method_attr, get_function_doc, get_function_short_description
from module_info import create_header_str
from pandas_info import get_pandas_modules, init_pandas_logging
from sdc_info import get_sdc_modules, init_sdc_logging
from texttable import Texttable
import os
PANDAS_API_STR = 'Pandas API: ' # This substring prepends Pandas API name in the documentation
APIREF_RELPATH = r'./_api_ref/' # Relative path to API Reference folder
RST_MODULES = {
'api_reference.rst': ['pandas'],
'io.rst': ['pandas.io.api', 'pandas.io.clipboards', 'pandas.io.common', 'pandas.io.excel',
'pandas.io.feather_format', 'pandas.io.formats.console', 'pandas.io.formats.format',
'pandas.io.formats.printing', 'pandas.io.gbq', 'pandas.io.html', 'pandas.io.json',
'pandas.io.msgpack', 'pandas.io.msgpack.exceptions', 'pandas.io.packers', 'pandas.io.parquet',
'pandas.io.parsers', 'pandas.io.pickle', 'pandas.io.pytables', 'pandas.io.sas',
'pandas.io.sas.sasreader', 'pandas.io.spss', 'pandas.io.sql', 'pandas.io.stata'],
'series.rst': ['pandas.Series'],
'dataframe.rst': ['pandas.DataFrame'],
''
'general_functions.rst': [],
}
pandas_modules = [] # List of Pandas submodules along with its functions and classes
sdc_modules = [] # List of Intel SDC submodules along with its functions and classes
def generate_module_doc(the_module):
module_doc = None
module_name = the_module['module_name']
# First, look up if there is RST file documenting particular module
for rst in RST_MODULES:
for mod in RST_MODULES[rst]:
if mod == module_name:
return module_doc # If there is a documentation for a given module then just return
# If there is no RST file then we create the documentation based on module's docstring
module_obj = the_module['module_object']
module_description = get_function_short_description(module_obj).strip()
if module_description is None:
module_description = ''
module_doc = module_description + '\n\nFor details please refer to Pandas API Reference for :py:mod:`' + \
module_name + '`\n\n'
return module_doc
def generate_api_index_for_module(the_module):
module_description = generate_module_doc(the_module)
if module_description is None:
module_description = ''
module_doc = ''
module_header_flag = False
# Document functions first, if any
tab = Texttable()
for func in the_module['functions']: # Iterate through the module functions
name = func['function_name']
obj = getattr(the_module['module_object'], name) # Retrieve the function object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
module_name = ''
func_doc = tab.draw()
if func_doc and func_doc != '': # If the function list is not empty then add module name to the document
module_name = the_module['module_name']
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Functions:', '-') + \
'\n\n' + func_doc + '\n\n'
module_header_flag = True
# Document classes
classes_header_flag = False
for the_class in the_module['classes']: # Iterate through the module classes
tab.reset()
class_name = the_class['class_name']
class_obj = the_class['class_object']
class_description = class_obj.__doc__
if not class_description:
class_description = ''
class_doc = ''
class_header_flag = False
# Document class attributes first, if any
for attr in the_class['class_attributes']: # Iterate through the class attributes
name = attr
obj = getattr(the_class['class_object'], name) # Retrieve the attribute object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
attr_doc = tab.draw()
if attr_doc and attr_doc != '': # If the attribute list is not empty then add class name to the document
class_header_flag = True
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Attributes:', '+') + \
'\n\n' + attr_doc + '\n\n'
# Document class methods, if any
for method in the_class['class_methods']: # Iterate through the class methods
name = method
obj = getattr(the_class['class_object'], name) # Retrieve the method object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
method_doc = tab.draw()
if method_doc and method_doc != '': # If the method list is not empty then add class name to the document
if not class_header_flag:
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
class_header_flag = True
else:
class_doc += create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
if not module_header_flag: # There is no module header yet
if class_header_flag: # There were methods/attributes for the class
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Classes:', '-') + \
'\n\n' + class_doc + '\n\n'
module_header_flag = True
classes_header_flag = True
else: # The module header has been added
if class_header_flag: # There are new methods/attributes for the class
if not classes_header_flag: # First class of the module description
module_doc += create_header_str('Classes:', '-') + '\n\n'
module_doc += '\n\n' + class_doc + '\n\n'
return module_doc
def get_module_rst_fname(the_module):
file_name = the_module['module_name']
file_name = file_name.replace('.', '/')
file_name = APIREF_RELPATH + file_name + '.rst'
return file_name
def generate_api_index():
doc = '.. _apireference::\n\nAPI Reference\n*************\n\n' \
'.. toctree::\n :maxdepth: 1\n\n'
for the_module in pandas_modules: # Iterate through pandas_modules
module_doc = generate_api_index_for_module(the_module)
if len(module_doc) > 0:
file_name = get_module_rst_fname(the_module)
write_rst(file_name, module_doc)
doc += ' ' + file_name + '\n'
return doc
def generate_sdc_object_doc(sdc_func):
sdc_titled_sections = get_function_doc(sdc_func, True)
sdc_see_also_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'see also'), '')
sdc_limitations_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'limitations'), '')
sdc_examples_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'examples'), '')
# Get respective Pandas API name
pandas_name = sdc_titled_sections[0]['text'].strip()
pandas_name = pandas_name.replace(PANDAS_API_STR, '')
pandas_name = pandas_name.replace('\n', '')
# Find respective Pandas API
doc_object = get_method_attr(pandas_name, pandas_modules)
if not doc_object:
doc_object = get_function(pandas_name, pandas_modules)
if not doc_object:
raise NameError('Pandas API:' + pandas_name + 'does not exist')
# Extract Pandas API docstring as the list of sections
pandas_titled_sections = []
if doc_object:
pandas_titled_sections = get_function_doc(doc_object, False)
# Form final docstring which is a combination of Pandas docstring for the description, Parameters section,
# Raises section, Returns section. See Also, Limitations and Examples sections (if any) are taken from SDC docstring
short_description_section = pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
long_description_section = ''
while pandas_titled_sections[0]['title'] == '':
long_description_section += pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
raises_section = parameters_section = returns_section = see_also_section = \
limitations_section = examples_section = ''
for section in pandas_titled_sections:
title = section['title'].lower().strip()
if title == 'raises':
raises_section = 'Raises\n------\n\n' + section['text'] + '\n\n'
elif title == 'parameters':
parameters_section = 'Parameters\n----------\n\n' + section['text'] + '\n\n'
elif title == 'return' or title == 'returns':
returns_section = 'Returns\n-------\n\n' + section['text'] + '\n\n'
if sdc_see_also_text:
see_also_section = '\n.. seealso::\n\n' + sdc_see_also_text + '\n\n'
if sdc_limitations_text:
limitations_section = 'Limitations\n-----------\n\n' + sdc_limitations_text + '\n\n'
if sdc_examples_text:
examples_section = 'Examples\n-----------\n\n' + sdc_examples_text + '\n\n'
rst_label = pandas_name.replace('.', '_')
n = len(pandas_name)
docstring = \
'.. _' + rst_label + ':\n\n' + \
pandas_name + '\n' + '*'*n + '\n' + \
short_description_section + \
long_description_section + \
parameters_section + \
returns_section + \
raises_section + \
limitations_section + \
examples_section + \
see_also_section
file_name = rst_label + '.rst'
return file_name, docstring
def write_rst(file_name, docstring):
directory = os.path.dirname(file_name)
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
file = open(file_name, 'w')
file.write(docstring)
file.close()
if __name__ == "__main__":
init_pandas_logging()
pandas_modules = get_pandas_modules()
init_sdc_logging()
sdc_modules = get_sdc_modules()
for the_module in sdc_modules:
if the_module['module_name'] == 'sdc.datatypes.hpat_pandas_series_functions':
for func in the_module['functions']:
file_name, doc = generate_sdc_object_doc(func['function_object'])
write_rst(APIREF_RELPATH + file_name, doc)
doc = generate_api_index()
write_rst('apireference.rst', doc)
| bsd-2-clause | 2,918,795,782,654,178,000 | 43.714286 | 120 | 0.610304 | false |
jpruf/building-permits-geo | pipeline/tasks/convert_to_dict.py | 1 | 1327 | import os
import csv
import json
TSV_FOLDER = "../data/tsv/"
FIELDNAMES = ("tract", "apn", "issue_date", "final_date", "lot", "permit_number", "owner",
"contractor", "applicant", "location", "approval_status", "sub_code",
"sub_code_description", "work_code", "work_code_description", "census_code",
"permit_valuation", "reroof_valuation", "square_feet", "units", "rsn", "pool",
"sewer", "enterprise", "permit_flag")
def clean_and_annotate(row, label):
title = label.split('_')
row["year"] = title[1]
row["type"] = title[2]
return row
def convert_to_dicts(label):
with open(TSV_FOLDER + label + '.txt', 'rU') as tsv_input:
tsv_reader = csv.DictReader(tsv_input, fieldnames=FIELDNAMES, delimiter='\t')
# Skip the first line of the CSV file, which contains the headers
next(tsv_reader)
return [clean_and_annotate(row, label) for row in tsv_reader]
def run():
permits = {}
# Go through all of the files, and convert them into arrays of dicts
for file_name in os.listdir(TSV_FOLDER):
if file_name.endswith(".txt"):
label = file_name.strip(".txt")
permits_for_file = convert_to_dicts(label)
permits[label] = permits_for_file
return permits | mit | 3,859,655,520,245,001,700 | 39.242424 | 92 | 0.600603 | false |
ymap/aioredis | tests/connection_commands_test.py | 1 | 3447 | import pytest
import asyncio
from aioredis import ConnectionClosedError, ReplyError
from aioredis.pool import ConnectionsPool
from aioredis import Redis
@pytest.mark.run_loop
async def test_repr(create_redis, loop, server):
redis = await create_redis(
server.tcp_address, db=1, loop=loop)
assert repr(redis) in {
'<Redis <RedisConnection [db:1]>>',
'<Redis <ConnectionsPool [db:1, size:[1:10], free:1]>>',
}
redis = await create_redis(
server.tcp_address, db=0, loop=loop)
assert repr(redis) in {
'<Redis <RedisConnection [db:0]>>',
'<Redis <ConnectionsPool [db:0, size:[1:10], free:1]>>',
}
@pytest.mark.run_loop
async def test_auth(redis):
expected_message = "ERR Client sent AUTH, but no password is set"
with pytest.raises(ReplyError, match=expected_message):
await redis.auth('')
@pytest.mark.run_loop
async def test_echo(redis):
resp = await redis.echo('ECHO')
assert resp == b'ECHO'
with pytest.raises(TypeError):
await redis.echo(None)
@pytest.mark.run_loop
async def test_ping(redis):
assert await redis.ping() == b'PONG'
@pytest.mark.run_loop
async def test_quit(redis, loop):
expected = (ConnectionClosedError, ConnectionError)
try:
assert b'OK' == await redis.quit()
except expected:
pass
if not isinstance(redis.connection, ConnectionsPool):
# reader task may not yet been cancelled and _do_close not called
# so the ConnectionClosedError may be raised (or ConnectionError)
with pytest.raises(expected):
try:
await redis.ping()
except asyncio.CancelledError:
assert False, "Cancelled error must not be raised"
# wait one loop iteration until it get surely closed
await asyncio.sleep(0, loop=loop)
assert redis.connection.closed
with pytest.raises(ConnectionClosedError):
await redis.ping()
@pytest.mark.run_loop
async def test_select(redis):
assert redis.db == 0
resp = await redis.select(1)
assert resp is True
assert redis.db == 1
assert redis.connection.db == 1
@pytest.mark.run_loop
async def test_encoding(create_redis, loop, server):
redis = await create_redis(
server.tcp_address,
db=1, encoding='utf-8',
loop=loop)
assert redis.encoding == 'utf-8'
@pytest.mark.run_loop
async def test_yield_from_backwards_compatability(create_redis, server, loop):
redis = await create_redis(server.tcp_address, loop=loop)
assert isinstance(redis, Redis)
# TODO: there should not be warning
# with pytest.warns(UserWarning):
with await redis as client:
assert isinstance(client, Redis)
assert client is not redis
assert await client.ping()
@pytest.redis_version(4, 0, 0, reason="SWAPDB is available since redis>=4.0.0")
@pytest.mark.run_loop
async def test_swapdb(create_redis, start_server, loop):
server = start_server('swapdb_1')
cli1 = await create_redis(server.tcp_address, db=0, loop=loop)
cli2 = await create_redis(server.tcp_address, db=1, loop=loop)
await cli1.flushall()
assert await cli1.set('key', 'val') is True
assert await cli1.exists('key')
assert not await cli2.exists('key')
assert await cli1.swapdb(0, 1) is True
assert not await cli1.exists('key')
assert await cli2.exists('key')
| mit | -7,592,511,956,837,837,000 | 28.211864 | 79 | 0.660865 | false |
chrys87/speech-bridge | src/speechbridge/core/settingsManager.py | 1 | 4438 | #!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
import importlib.util
import os
import __main__
from configparser import ConfigParser
from core import environment
from core import inputEvent
from core.settings import settings
from core import debug
class settingsManager():
def __init__(self):
self.settings = settings
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def loadSettings(self, settingConfigPath):
if not os.path.exists(settingConfigPath):
return False
self.env['settings'] = ConfigParser()
self.env['settings'].read(settingConfigPath)
return True
def setSetting(self, section, setting, value):
self.env['settings'].set(section, setting, value)
def getSetting(self, section, setting):
value = ''
try:
value = self.env['settings'].get(section, setting)
except:
value = str(self.settings[section][setting])
return value
def getSettingAsInt(self, section, setting):
value = 0
try:
value = self.env['settings'].getint(section, setting)
except:
value = self.settings[section][setting]
return value
def getSettingAsFloat(self, section, setting):
value = 0.0
try:
value = self.env['settings'].getfloat(section, setting)
except:
value = self.settings[section][setting]
return value
def getSettingAsBool(self, section, setting):
value = False
try:
value = self.env['settings'].getboolean(section, setting)
except:
value = self.settings[section][setting]
return value
def loadDriver(self, driverName, driverType):
try:
if self.env['runtime'][driverType] != None:
self.env['runtime'][driverType].shutdown(self.env)
spec = importlib.util.spec_from_file_location(driverName, os.path.dirname(os.path.realpath(__main__.__file__)) + "/" + driverType + '/' + driverName + '.py')
driver_mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(driver_mod)
self.env['runtime'][driverType] = driver_mod.driver()
self.env['runtime'][driverType].initialize(self.env)
self.env['runtime']['debug'].writeDebugOut('Loading Driver ' + driverType +" OK",debug.debugLevel.INFO, onAnyLevel=True)
except Exception as e:
self.env['runtime'][driverType] = None
self.env['runtime']['debug'].writeDebugOut("Loading " + driverType + " Driver : "+ str(e), debug.debugLevel.ERROR)
def shutdownDriver(self, driverType):
if self.env['runtime'][driverType] == None:
return
self.env['runtime'][driverType].shutdown()
del self.env['runtime'][driverType]
def initSpeechBridgeConfig(self, environment = environment.environment, settingsRoot = '/etc/speechBridge/', settingsFile='settings.conf'):
environment['runtime']['debug'] = debug.debug()
environment['runtime']['debug'].initialize(environment)
if not os.path.exists(settingsRoot):
if os.path.exists(os.path.dirname(os.path.realpath(__main__.__file__)) +'/../../config/'):
settingsRoot = os.path.dirname(os.path.realpath(__main__.__file__)) +'/../../config/'
else:
return None
environment['runtime']['settingsManager'] = self
environment['runtime']['settingsManager'].initialize(environment)
validConfig = environment['runtime']['settingsManager'].loadSettings(settingsRoot + '/settings/' + settingsFile)
if not validConfig:
return None
environment['runtime']['debug'].writeDebugOut('\/-------environment-------\/',debug.debugLevel.INFO, onAnyLevel=True)
environment['runtime']['debug'].writeDebugOut(str(environment),debug.debugLevel.INFO, onAnyLevel=True)
environment['runtime']['debug'].writeDebugOut('\/-------settings.conf-------\/',debug.debugLevel.INFO, onAnyLevel=True)
environment['runtime']['debug'].writeDebugOut(str(environment['settings']._sections
),debug.debugLevel.INFO, onAnyLevel=True)
return environment
| lgpl-3.0 | 4,906,282,375,151,863,000 | 40.092593 | 169 | 0.616043 | false |
hughsons/saltwaterfish | admin/views.py | 1 | 28274 | from django.http import *
from forms import UploadForm
from django import template
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView, View
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User, Group, Permission
from models import *
from django.db import models
from django.db.models import Count, Min, Sum, Max, Avg
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import unittest
from django.db import connection, transaction
import logging
import hashlib
from google.appengine.api import files
try:
files.gs
except AttributeError:
import gs
files.gs = gs
PERPAGE=50
def checkadminlogin_dispatch(f):
def wrap(request, *args, **kwargs):
if 'IsLogin' in request.session and request.session['IsLogin'] and 'Staff' in request.session and request.session['Staff'].username !="":
staff_list = Admins.objects.filter(username = request.session['Staff_username'], pass_field = hashlib.md5(request.session['Staff_password']).hexdigest())
if staff_list:
request.session['IsLogin'] = True
request.session['Staff'] = staff_list[0]
success = True
else:
return HttpResponseRedirect('/logout')
logging.info('Fetch Started:: %s', staff_list[0])
else:
return HttpResponseRedirect('/logout')
return f(request, *args, **kwargs)
return wrap
class CsrfExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CsrfExemptMixin, self).dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(checkadminlogin_dispatch)
def dispatch(self,request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
@csrf_exempt
def render_template(request, template, data=None):
errs =""
if request.method == 'GET' and 'err' in request.GET:
data.update({'errs':request.GET['err']})
response = render_to_response(template, data,
context_instance=RequestContext(request))
return response
class CMSClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Extrapages.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Extrapages.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "cms_pages.htm", content)
class CMSEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['pageid']
allpages = Extrapages.objects.get(id=pageid)
content = {'page_title': "Summary",
'allpages':allpages,
}
return render_template(request, "cms_pages_edit.htm", content)
class EmailViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Emails.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Emails.objects.all()[offset-100:offset]
content = {'page_title': "Admin :: Email List",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "email_pages.htm", content)
class EmailEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['id']
allpages = Emails.objects.get(id=pageid)
content = {'page_title': "Admin::Email Edit",
'allpages':allpages,
}
return render_template(request, "email_pages_edit.htm", content)
class CMSAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Summary",}
return render_template(request, "cms_pages_add.htm", content)
class TitlesContentClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Html.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Html.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "titles_content.htm", content)
class ProductWishListClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
#allitems = ProductWaitinglist.objects.annotate(dcount=Count('catalogid')).values('catalogid',
# 'current_stock',
# 'products__catalogid').all()[offset-100:offset]
allitems = ProductWaitinglist.objects.raw('select count(*) as dcount,product_waitinglist.catalogid,products.id,name,current_stock from product_waitinglist,products where product_waitinglist.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductWaitinglist.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_wish_list.htm", content)
class ProductWishViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
itemid = request.GET['itemid']
allitems = ProductWaitinglist.objects.filter(catalogid=itemid).all()[offset-100:offset]
count = ProductWaitinglist.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_wish_list_view_list.htm", content)
class ReviewAllClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = ProductReview.objects.raw('select count(*) as dcount,product_review.catalogid,products.id,name,thumbnail from product_review, products where product_review.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductReview.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_7_reviews.htm", content)
class ProductsReviewsViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.filter(catalogid=itemid).all()
count = ProductReview.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'itemid':itemid,
}
return render_template(request, "products_review_view_list.htm", content)
class ProductsReviewEditFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.get(id=itemid)
content = {'page_title': "Summary",
'allitems':allitems,
#'count':count,
#'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_7_reviews_edit_2_edit.htm", content)
class ApanelViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Profile",}
return render_template(request, "home-page-admin.htm", content)
class CustomersViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = customers.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':customers.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "customers.htm", content)
class CRMViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
if 'status' in request.GET and request.GET['status'] != "":
status = request.GET['status']
else:
status = 1
count = Crm.objects.filter(status=status).count()
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Crm.objects.all().filter(status=status)[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "crm.htm", content)
class CRMEditViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
crmid = request.GET['id']
allitems = Crm.objects.get(id=crmid)
categories = ProductCategory.objects.all()
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "crm_edit.htm", content)
class StaffViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Site Staff",
'customers':Admins.objects.all()[:100],
'count':Admins.objects.count(),}
return render_template(request, "admins.htm", content)
class CategoryViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Category.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':Category.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "categories.htm", content)
class CustomerAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'title': "Add Customer",}
return render_template(request, "customer_add.htm", content)
class CustomerInfoClass(LoginRequiredMixin,TemplateView):
#summary = Customers.objects.all()
def get(self, request, *args, **kwargs):
cid = request.GET['id']
customer = customers.objects.get(contactid=cid)
customeremail= customer.email
customerrewards = CustomerRewards.objects.filter(contactid=cid).all()
totalrewards = CustomerRewards.objects.filter(contactid=cid).aggregate(Sum('points'))
#customers_promocode = SwfCustomerCreditsLog.objects.values_list('customers_promocode', flat=True)
#customers_promocode = customers_promocode['customers_promocode']
#storerewards = SwfCustomerCreditsLog.objects.filter(customers_email_address=customeremail)
storerewards = SwfCustomerCreditsLog.objects.raw('select *,swf_customer_credits_log.id as sid from swf_customer_credits_log , promotions where customers_promocode = coupon AND customers_email_address="'+customeremail+'" AND customers_promocode != ""')
fulldata = list(storerewards)
try:
wish_id = WshWishlist.objects.get(customerid=cid)
wishitems = WsiWishlistitems.objects.filter(wsh_id=wish_id.wsh_id)
except Exception as e:
wishitems = ""
content = {'page_title': "Customers Info",
'customer': customer,
'customerorders':Orders.objects.filter(ocustomerid=cid).all(),
'wishlists':wishitems,
'customerrewards':customerrewards,
'totalrewards':totalrewards,
'storerewards':fulldata,
}
#'count':Admins.objects.count(),}
return render_template(request, "customers_info.htm", content)
class ProductsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Products.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Products.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "products.htm", content)
class ProductViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productedit.htm", content)
class ProductRelatedClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productrelated.htm", content)
class ProductsImagesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "images_products.htm", content)
class ApanelViewOrdersClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
order_status = request.GET['order_status']
if order_status < 1:
order_status = 1
else:
order_status = order_status
count = Orders.objects.filter(order_status=order_status).count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Orders.objects.all().filter(order_status=order_status)[offset-100:offset]
order_status_links = OrderStatus.objects.all().filter(visible='1')
#crm_messages=CrmMessages.objects.select_related(crmid__orderid='8623')
#return HttpResponse(crm_messages)
content = {'page_title': "Orders",
'allitems':allitems,
'count':count,
'page_num':page_num,
'order_status':order_status,
'order_links':order_status_links,}
return render_template(request, "vieworders.htm", content)
class ApanelViewOrdersStatusClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = OrderStatus.objects.all()
content = {'page_title': "Orders Status",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "orders_status.htm", content)
class OrderPageClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
oid = request.GET['oid']
order_status_links = OrderStatus.objects.all().filter(visible='1')
allitems = Orders.objects.get(orderid=oid)
try:
transactions = Transactions.objects.get(orderid=oid)
amount = transactions.amount
totalamt = Oitems.objects.filter(orderid=oid).aggregate(Sum('unitprice'))
totalamt = totalamt['unitprice__sum']
except Exception as e:
transactions = ""
totalamt = 0
amount = 0
alloiitems = Oitems.objects.all().filter(orderid=oid)
finaltotal = (totalamt + int(allitems.oshipcost)) - allitems.coupondiscount
balance = finaltotal - amount
content = {'page_title': "Orders Status",
'allitems':allitems,
'alloiitems':alloiitems,
'order_links':order_status_links,
'totalamt':totalamt,
'finaltotal':finaltotal,
'paidamt':finaltotal,
'transactions':transactions,
'balance':balance,
}
return render_template(request, "orderpage.htm", content)
class AddAdminsFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = Admins.objects.all()
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ""
if "id" in request.GET:
allitems = Admins.objects.get(id=request.GET['id'])
else:
allitems = ""
content = {'page_title': "Add User",
'allitems':allitems,
'mode':mode,}
return render_template(request, "admins_add.htm", content)
class RmaPagesClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Rma.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Rma.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "rma_pages.htm", content)
class RmaViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
rmaid=request.GET['rmaid']
allitems = Rma.objects.get(idrma=rmaid)
content = {'page_title': "View RMA",
'allitems':allitems,}
return render_template(request, "rmaview.htm", content)
class ShippingManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ShippingCategory.objects.all()
content = {'page_title': "Admin: Shipping Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "adminshippingmanager.htm", content)
class TaxManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = Tax.objects.all()
content = {'page_title': "Admin: Tax Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "taxmanager.htm", content)
class GiftCertificatesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = GiftCertificates.objects.all().count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = GiftCertificates.objects.all()[offset-100:offset]
content = {'page_title': "Admin: Gift Certificate View",
'allitems':allitems,
'page_num':page_num,
'count':count,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "giftcertificate_pages.htm", content)
class EditGiftCertificateClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
giftid=request.GET['id']
allitems = GiftCertificates.objects.get(id=giftid)
total = allitems.certificate_amount + allitems.certificate_expenses
content = {'page_title': "Admin :: Edit Gift Certificate",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),
'total':total}
return render_template(request, "edit_giftcertificate.htm", content)
class ProductArticleViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductArticle.objects.all().filter(catalogid=pid)
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_articles.htm", content)
class ProductArticleEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['id']
allpages = ProductArticle.objects.get(id=pid)
content = {'page_title': "Admin :: Edit Article",
'allpages':allpages,}
return render_template(request, "product_article_edit.htm", content)
class ProductArticleAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
content = {'page_title': "Admin :: Add Article",
'pid':pid,}
return render_template(request, "product_article_add.htm", content)
class ProductReviewsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductReview.objects.filter(catalogid=pid).all()
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_reviews.htm", content)
class ProductOptionEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allpages = Products.objects.get(catalogid=pid)
content = {'page_title': "Admin :: Edit Options",
'allpages':allpages,
'prod':pid,}
return render_template(request, "product_options_edit.htm", content)
class BannersViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
allpages = SiteBanners.objects.all()
content = {'page_title': "Admin :: Banner Managements",
'allitems':allpages,}
return render_template(request, "viewbanners.htm", content)
class BannerEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
bid = request.GET['bid']
filename = "/gs/swf_product_images/banner/banner5.png"
allpages = SiteBanners.objects.get(id=bid)
content = {'page_title': "Admin :: Edit banner",
'allpages':allpages,
'bannerpath':filename,}
return render_template(request, "editbanner.htm", content)
class BannersAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
return render_template(request, "addbanner.htm", content)
class GCSfilesClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
file_list = files.listdir('/gs/swf_product_images')
for file_name in file_list:
if not file_name.__contains__('$folder$'):
self.response.write('<a href="https://storage.cloud.google.com/%s">%s<a><br>' %(file_name[4:], file_name[4:]))
#return render_template(request, "gcsfiles.htm", content)
class CouponsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Promotions.objects.count()
if "page" in request.GET and request.GET['page'] != "":
page_num = request.GET['page']
else:
page_num = 1
#pages = count/100
page_num = int(page_num)
offset = page_num * 100
allitems = Promotions.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "viewcoupons.htm", content)
| bsd-3-clause | -3,690,833,010,790,051,300 | 41.326347 | 266 | 0.592594 | false |
wingtk/icbuild | icbuild/modtypes/msvc.py | 1 | 3122 | # icbuild - a tool to ease building collections of source packages
# Copyright (C) 2015 Ignacio Casal Quinteiro
#
# msvc.py: msvc module type definitions.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__metaclass__ = type
import os
from icbuild.errors import BuildStateError, CommandError
from icbuild.modtypes import \
Package, DownloadableModule, register_module_type, MakeModule
__all__ = [ 'MSVCModule' ]
class MSVCModule(Package, DownloadableModule):
"""Base type for modules that use MSBuild build system."""
type = 'msvc'
PHASE_CHECKOUT = DownloadableModule.PHASE_CHECKOUT
PHASE_FORCE_CHECKOUT = DownloadableModule.PHASE_FORCE_CHECKOUT
PHASE_BUILD = 'build'
PHASE_INSTALL = 'install'
def __init__(self, name, branch=None,
solution='', msvcargs=''):
Package.__init__(self, name, branch=branch)
self.solution = solution
self.msvcargs = msvcargs
def get_srcdir(self, buildscript):
return self.branch.srcdir
def get_builddir(self, buildscript):
return self.get_srcdir(buildscript)
def do_build(self, buildscript):
buildscript.set_action('Building', self)
srcdir = self.get_srcdir(buildscript)
msbuild = buildscript.config.msbuild
cmd = [ msbuild, self.solution, self.makeargs ]
buildscript.execute(cmd, cwd = srcdir)
do_build.depends = [PHASE_CHECKOUT]
do_build.error_phases = [PHASE_FORCE_CHECKOUT]
def do_install(self, buildscript):
buildscript.set_action('Installing', self)
# do nothing for now
do_install.depends = [PHASE_BUILD]
def xml_tag_and_attrs(self):
return 'msvc', [('id', 'name', None)]
def collect_args(instance, node, argtype):
if node.hasAttribute(argtype):
args = node.getAttribute(argtype)
else:
args = ''
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == argtype:
if not child.hasAttribute('value'):
raise FatalError("<%s/> tag must contain value=''" % argtype)
args += ' ' + child.getAttribute('value')
return instance.eval_args(args)
def parse_msvc(node, config, uri, repositories, default_repo):
instance = MSVCModule.parse_from_xml(node, config, uri, repositories, default_repo)
instance.msvcargs = collect_args(instance, node, 'msvcargs')
return instance
register_module_type('msvc', parse_msvc)
| gpl-2.0 | 1,607,374,235,951,469,600 | 34.078652 | 87 | 0.68802 | false |
MarinusVL/scRNApipe | setup.py | 1 | 1056 | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='scRNApipe',
version='0.1.0',
description='Package for analysing scRNA-seq in Transcript Tag Counting data.',
long_description=read('README.md'),
author='Stavros Giannoukakos',
author_email='[email protected]',
packages=['scRNApipe'],
url=['https://github.com/MarinusVL/scRNApipe'],
keywords=['single cell RNA analysis'],
install_requires=['pysam>=0.8.3', 'numpy', 'multiqc', 'STAR', 'umis', 'umi_tools', ,'python>=2.5,<3','natsort'],
dependency_links=['https://sourceforge.net/projects/subread/files/subread-1.5.2/subread-1.5.2-source.tar.gz/download',
'https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.5_source.zip'
],
package_data = {
'': ['configuration_file.txt']
},
entry_points={
'console_scripts': ['scRNApipe = scRNApipe.scRNApipe:main']
},
)
| mit | 4,567,553,069,334,808,000 | 35.413793 | 122 | 0.631629 | false |
catalpainternational/OIPA | OIPA/api/v2/resources/advanced_resources.py | 1 | 7277 | from builtins import object
from tastypie.resources import ModelResource
from geodata.models import Country, Region, City
from indicator.models import Indicator
from tastypie import fields
from tastypie.serializers import Serializer
class IndicatorFiltersResource(ModelResource):
name = fields.CharField(attribute='name')
class Meta:
queryset = Indicator.objects.all()
resource_name = 'indicator-filters'
serializer = Serializer(formats=['xml', 'json'])
excludes = ['description', 'type_data', 'selection_type', 'deprivation_type', 'rain_programme']
include_resource_uri = False
# def dehydrate(self, bundle):
# bundle.data['region_id'] = bundle.obj.country.region_id
#
#
def dehydrate_name(selfself, bundle):
return bundle.data['name']
class OnlyCountryResource(ModelResource):
class Meta:
queryset = Country.objects.all().order_by('name')
include_resource_uri = False
excludes = ['center_longlat', 'dac_country_code', 'dac_region_code', 'dac_region_name', 'iso3', 'language', 'polygon']
resource_name = 'country'
limit = 1000
class OnlyRegionResource(ModelResource):
class Meta:
queryset = Region.objects.all().distinct().order_by('code')
resource_name = 'region'
include_resource_uri = False
class OnlyCityResource(ModelResource):
class Meta:
queryset = City.objects.all().order_by('name')
resource_name = 'city'
include_resource_uri = False
excludes = ['alt_name', 'ascii_name', 'geoname_id', 'location']
def dehydrate(self, bundle):
bundle.data['country'] = bundle.obj.country.code
return bundle
def apply_filters(self, request, applicable_filters):
base_object_list = super(OnlyCityResource, self).apply_filters(request, applicable_filters)
countries = request.GET.get('country', None)
filters = {}
if countries:
countries = countries.replace('|', ',').replace('-', ',').split(',')
filters.update(dict(country__iso__in=countries))
return base_object_list.filter(**filters).distinct()
#
# class UnHabitatIndicatorCountryResource(ModelResource):
# class Meta:
# queryset = UnHabitatIndicatorCountry.objects.all()
# include_resource_uri = False
# resource_name = 'indicator-country'
# serializer = Serializer(formats=['xml', 'json'])
# filtering = {"year": ALL }
# # authentication = ApiKeyAuthentication()
#
#
# def dehydrate(self, bundle):
# bundle.data['country_iso'] = bundle.obj.country.iso
# bundle.data['country_iso3'] = bundle.obj.country.iso3
#
# bundle.data['country_name'] = bundle.obj.country.get_iso_display()
# bundle.data['dac_region_code'] = bundle.obj.country.dac_region_code
# bundle.data['dac_region_name'] = bundle.obj.country.dac_region_name
# tpset = bundle.obj.typedeprivationcountry_set.all()
# tp_list = {}
# for tp in tpset:
# temp_list = {}
# temp_list['type'] = tp.get_type_deprivation_display()
# temp_list['non_slum_household'] = tp.non_slum_household
# temp_list['slum_household'] = tp.slum_household
# temp_list['one_shelter_deprivation'] = tp.one_shelter_deprivation
# temp_list['two_shelter_deprivations'] = tp.two_shelter_deprivations
# temp_list['three_shelter_deprivations'] = tp.three_shelter_deprivations
# temp_list['four_shelter_deprivations'] = tp.four_shelter_deprivations
# temp_list['gender'] = tp.gender
# temp_list['extra_type_name'] = tp.extra_type_name
# temp_list['is_matrix'] = tp.is_matrix
# temp_list['urban'] = tp.urban
# temp_list['total'] = tp.total
# temp_list['rural'] = tp.rural
#
# tp_list['deprivation_id_'+str(tp.id)] = temp_list
# bundle.data['deprivation'] = tp_list
# bundle.data.pop('id')
#
# return bundle
#
# def apply_filters(self, request, applicable_filters):
# base_object_list = super(UnHabitatIndicatorCountryResource, self).apply_filters(request, applicable_filters)
# regions = request.GET.get('regions', None)
# countries = request.GET.get('country_name', None)
# isos = request.GET.get('iso', None)
# indicator = request.GET.get('indicator', None)
#
#
#
# filters = {}
# if regions:
# # @todo: implement smart filtering with seperator detection
# regions = regions.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__dac_region_code__in=regions))
# if countries:
# countries = countries.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__country_name__in=countries))
# if isos:
# isos = isos.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__iso__in=isos))
# #
#
# return base_object_list.filter(**filters).distinct()
#
#
# class UnHabitatIndicatorcityResource(ModelResource):
# class Meta:
# queryset = UnHabitatIndicatorcity.objects.all()
# include_resource_uri = False
# resource_name = 'indicator-city'
# serializer = Serializer(formats=['xml', 'json'])
# filtering = {"year": ALL }
# # authentication = ApiKeyAuthentication()
#
#
# def dehydrate(self, bundle):
# bundle.data['country_iso'] = bundle.obj.city.country.iso
# bundle.data['country_name'] = bundle.obj.city.country.get_iso_display()
# bundle.data['dac_region_code'] = bundle.obj.city.country.dac_region_code
# bundle.data['dac_region_name'] = bundle.obj.city.country.dac_region_name
# bundle.data['city_name'] = bundle.obj.city.name
#
# # bundle.data['']
#
# bundle.data.pop('id')
#
# return bundle
#
# def apply_filters(self, request, applicable_filters):
# base_object_list = super(UnHabitatIndicatorcityResource, self).apply_filters(request, applicable_filters)
# regions = request.GET.get('regions', None)
# countries = request.GET.get('country_name', None)
# isos = request.GET.get('iso', None)
# city = request.GET.get('city', None)
#
#
#
# filters = {}
# if regions:
# # @todo: implement smart filtering with seperator detection
# regions = regions.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__dac_region_code__in=regions))
# if countries:
# countries = countries.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__country_name__in=countries))
# if isos:
# isos = isos.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__iso__in=isos))
# if city:
# city = city.replace('|', ',').replace('-', ',').split(',')
#
# filters.update(dict(city__name__in=city))
#
# return base_object_list.filter(**filters).distinct()
#
#
#
| agpl-3.0 | 8,310,001,698,274,749,000 | 36.127551 | 126 | 0.600797 | false |
zaitcev/swift-lfs | test/probe/common.py | 1 | 7421 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from httplib import HTTPConnection
from os import kill, path
from signal import SIGTERM
from subprocess import Popen, PIPE
from time import sleep, time
from swiftclient import get_auth, head_account
from swift.common.ring import Ring
from test.probe import CHECK_SERVER_TIMEOUT
def start_server(port, port2server, pids, check=True):
server = port2server[port]
if server[:-1] in ('account', 'container', 'object'):
if not path.exists('/etc/swift/%s-server/%s.conf' %
(server[:-1], server[-1])):
return None
pids[server] = Popen([
'swift-%s-server' % server[:-1],
'/etc/swift/%s-server/%s.conf' % (server[:-1], server[-1])]).pid
if check:
return check_server(port, port2server, pids)
else:
pids[server] = Popen(['swift-%s-server' % server,
'/etc/swift/%s-server.conf' % server]).pid
if check:
return check_server(port, port2server, pids)
return None
def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT):
server = port2server[port]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
return None
path = '/connect/1/2'
if server[:-1] == 'container':
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
try_until = time() + timeout
while True:
try:
conn = HTTPConnection('127.0.0.1', port)
conn.request('GET', path)
resp = conn.getresponse()
# 404 because it's a nonsense path (and mount_check is false)
# 507 in case the test target is a VM using mount_check
if resp.status not in (404, 507):
raise Exception(
'Unexpected status %s' % resp.status)
break
except Exception, err:
if time() > try_until:
print err
print 'Giving up on %s:%s after %s seconds.' % (
server, port, timeout)
raise err
sleep(0.1)
else:
try_until = time() + timeout
while True:
try:
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
account = url.split('/')[-1]
head_account(url, token)
return url, token, account
except Exception, err:
if time() > try_until:
print err
print 'Giving up on proxy:8080 after 30 seconds.'
raise err
sleep(0.1)
return None
def kill_server(port, port2server, pids):
try:
kill(pids[port2server[port]], SIGTERM)
except Exception, err:
print err
try_until = time() + 30
while True:
try:
conn = HTTPConnection('127.0.0.1', port)
conn.request('GET', '/')
conn.getresponse()
except Exception, err:
break
if time() > try_until:
raise Exception(
'Still answering on port %s after 30 seconds' % port)
sleep(0.1)
def kill_servers(port2server, pids):
for port in port2server:
kill_server(port, port2server, pids)
def kill_nonprimary_server(primary_nodes, port2server, pids):
primary_ports = [n['port'] for n in primary_nodes]
for port, server in port2server.iteritems():
if port in primary_ports:
server_type = server[:-1]
break
else:
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for port, server in list(port2server.iteritems()):
if server[:-1] == server_type and port not in primary_ports:
kill_server(port, port2server, pids)
return port
def reset_environment():
p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
print stdout
pids = {}
try:
port2server = {}
config_dict = {}
for server, port in [('account', 6002), ('container', 6001),
('object', 6000)]:
for number in xrange(1, 9):
port2server[port + (number * 10)] = '%s%d' % (server, number)
for port in port2server:
start_server(port, port2server, pids, check=False)
for port in port2server:
check_server(port, port2server, pids)
port2server[8080] = 'proxy'
url, token, account = start_server(8080, port2server, pids)
account_ring = Ring('/etc/swift/account.ring.gz')
container_ring = Ring('/etc/swift/container.ring.gz')
object_ring = Ring('/etc/swift/object.ring.gz')
for name in ('account', 'container', 'object'):
for server in (name, '%s-replicator' % name):
config_dict[server] = '/etc/swift/%s-server/%%d.conf' % name
except BaseException:
try:
raise
finally:
try:
kill_servers(port2server, pids)
except Exception:
pass
return pids, port2server, account_ring, container_ring, object_ring, url, \
token, account, config_dict
def get_to_final_state():
processes = []
for job in ('account-replicator', 'container-replicator',
'object-replicator'):
for number in xrange(1, 9):
if not path.exists('/etc/swift/%s-server/%d.conf' %
(job.split('-')[0], number)):
continue
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
processes = []
for job in ('container-updater', 'object-updater'):
for number in xrange(1, 5):
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
processes = []
for job in ('account-replicator', 'container-replicator',
'object-replicator'):
for number in xrange(1, 9):
if not path.exists('/etc/swift/%s-server/%d.conf' %
(job.split('-')[0], number)):
continue
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
| apache-2.0 | 5,316,278,556,826,849,000 | 35.377451 | 79 | 0.538741 | false |
gitcoinco/web | app/grants/tests/test_views.py | 1 | 1226 | import json
from django.urls import reverse
from grants.models import Grant, GrantCategory, GrantType
from grants.views import basic_grant_categories
from test_plus.test import TestCase
class GrantsViewResponsesTests(TestCase):
# def test_not_authorized(self):
# response = self.client.post(reverse('grants:new_matching_partner'))
# expected_response = {'status': 200}
# self.assertEqual(response.status_code, expected_response['status'])
def test_fetching_grant_categories_from_api(self):
'''
response = self.client.get(reverse('grants:grant_categories'))
if GrantCategory.objects.all().count == 0:
return
result = []
for category in GrantCategory.objects.all():
result.append(category.category)
result = list(set(result))
categories = [ (category,idx) for idx, category in enumerate(result) ]
expected_response = {
'categories': categories,
'status': 200
}
self.assertEqual(response.status_code, expected_response['status'])
self.assertEqual(json.loads(response.content)['categories'], expected_response['categories'])
'''
pass
| agpl-3.0 | -8,922,970,137,816,865,000 | 29.65 | 101 | 0.650897 | false |
dhylands/bioloid3 | bioloid/bus.py | 1 | 9709 | """This module provides the Bus class which knows how to talk to Bioloid
devices, and the BusError exception which is raised when an error is
enountered.
"""
import pyb
from bioloid import packet
from bioloid.dump_mem import dump_mem
from bioloid.log import log
class BusError(Exception):
"""Exception which is raised when a non-successful status packet is received."""
def __init__(self, error_code, *args, **kwargs):
super(BusError, self).__init__(self, *args, **kwargs)
self.error_code = error_code
def get_error_code(self):
"""Retrieves the error code associated with the exception."""
return self.error_code
def __str__(self):
return "Rcvd Status: " + str(packet.ErrorCode(self.error_code))
class Bus:
"""The Bus class knows the commands used to talk to bioloid devices."""
SHOW_NONE = 0
SHOW_COMMANDS = (1 << 0)
SHOW_PACKETS = (1 << 1)
def __init__(self, serial_port, show=SHOW_NONE):
self.serial_port = serial_port
self.show = show
def action(self):
"""Broadcasts an action packet to all of the devices on the bus.
This causes all of the devices to perform their deferred writes
at the same time.
"""
if self.show & Bus.SHOW_COMMANDS:
log('Broadcasting ACTION')
self.fill_and_write_packet(packet.Id.BROADCAST, packet.Command.ACTION)
def fill_and_write_packet(self, dev_id, cmd, data=None):
"""Allocates and fills a packet. data should be a bytearray of data
to include in the packet, or None if no data should be included.
"""
packet_len = 6
if data is not None:
packet_len += len(data)
pkt_bytes = bytearray(packet_len)
pkt_bytes[0] = 0xff
pkt_bytes[1] = 0xff
pkt_bytes[2] = dev_id
pkt_bytes[3] = 2 # for len and cmd
pkt_bytes[4] = cmd
if data is not None:
pkt_bytes[3] += len(data)
pkt_bytes[5:packet_len - 1] = data
pkt_bytes[-1] = ~sum(pkt_bytes[2:-1]) & 0xff
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt_bytes, prefix=' W', show_ascii=True, log=log)
self.serial_port.write_packet(pkt_bytes)
def ping(self, dev_id):
"""Sends a PING request to a device.
Returns true if the device responds successfully, false if a timeout
occurs, and raises a bus.Error for any other failures.
raises a BusError for any other failures.
"""
self.send_ping(dev_id)
try:
self.read_status_packet()
except BusError as ex:
if ex.get_error_code() == packet.ErrorCode.TIMEOUT:
return False
raise ex
return True
def read(self, dev_id, offset, num_bytes):
"""Sends a READ request and returns data read.
Raises a bus.Error if any errors occur.
"""
self.send_read(dev_id, offset, num_bytes)
pkt = self.read_status_packet()
return pkt.params()
def read_status_packet(self):
"""Reads a status packet and returns it.
Rasises a bioloid.bus.BusError if an error occurs.
"""
pkt = packet.Packet(status_packet=True)
while True:
# start = pyb.micros()
byte = self.serial_port.read_byte()
if byte is None:
if self.show & Bus.SHOW_COMMANDS:
log('TIMEOUT')
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
raise BusError(packet.ErrorCode.TIMEOUT)
err = pkt.process_byte(byte)
if err != packet.ErrorCode.NOT_DONE:
break
if err != packet.ErrorCode.NONE:
err_ex = BusError(err)
if self.show & Bus.SHOW_COMMANDS:
log(err_ex)
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
raise err_ex
err = pkt.error_code()
if self.show & Bus.SHOW_COMMANDS:
log('Rcvd Status: {} from ID: {}'.format(packet.ErrorCode(err), pkt.dev_id))
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
if err != packet.ErrorCode.NONE:
raise BusError(err)
return pkt
def reset(self, dev_id):
"""Sends a RESET request.
Raises a bus.Error if any errors occur.
"""
self.send_reset(dev_id)
if dev_id == packet.Id.BROADCAST:
return packet.ErrorCode.NONE
pkt = self.read_status_packet()
return pkt.error_code()
def scan(self, start_id=0, num_ids=32, dev_found=None, dev_missing=None):
"""Scans the bus, calling devFound(self, dev) for each device
which responds, and dev_missing(self, dev) for each device
which doesn't.
Returns true if any devices were found.
"""
end_id = start_id + num_ids - 1
if end_id >= packet.Id.BROADCAST:
end_id = packet.Id.BROADCAST - 1
some_dev_found = False
for dev_id in range(start_id, end_id + 1):
if self.ping(dev_id):
some_dev_found = True
if dev_found:
dev_found(self, dev_id)
else:
if dev_missing:
dev_missing(self, dev_id)
return some_dev_found
def send_ping(self, dev_id):
"""Sends a ping to a device."""
if self.show & Bus.SHOW_COMMANDS:
log('Sending PING to ID {}'.format(dev_id))
self.fill_and_write_packet(dev_id, packet.Command.PING)
def send_read(self, dev_id, offset, num_bytes):
"""Sends a READ request to read data from the device's control
table.
"""
if self.show & Bus.SHOW_COMMANDS:
log('Sending READ to ID {} offset 0x{:02x} len {}'.format(
dev_id, offset, num_bytes))
self.fill_and_write_packet(dev_id, packet.Command.READ, bytearray((offset, num_bytes)))
def send_reset(self, dev_id):
"""Sends a RESET command to the device, which causes it to reset the
control table to factory defaults.
"""
if self.show & Bus.SHOW_COMMANDS:
if dev_id == packet.Id.BROADCAST:
log('Broadcasting RESET')
else:
log('Sending RESET to ID {}'.format(dev_id))
self.fill_and_write_packet(dev_id, packet.Command.RESET)
def send_write(self, dev_id, offset, data, deferred=False):
"""Sends a WRITE request if deferred is False, or REG_WRITE
request if deferred is True to write data into the device's
control table.
data should be an array of ints, or a bytearray.
Deferred writes will occur when and ACTION command is broadcast.
"""
if self.show & Bus.SHOW_COMMANDS:
cmd_str = 'REG_WRITE' if deferred else 'WRITE'
if dev_id == packet.Id.BROADCAST:
log('Broadcasting {} offset 0x{:02x} len {}'.format(cmd_str, offset, len(data)))
else:
log('Sending {} to ID {} offset 0x{:02x} len {}'.format(cmd_str, dev_id, offset, len(data)))
cmd = packet.Command.REG_WRITE if deferred else packet.Command.WRITE
pkt_data = bytearray(len(data))
pkt_data[0] = offset
pkt_data[1:] = data
self.fill_and_write_packet(dev_id, cmd, pkt_data)
def sync_write(self, dev_ids, offset, values):
"""Sets up a synchroous write command.
dev_ids should be an array of device ids.
offset should be the offset that the data will be written to.
values should be an array of bytearrays. There should be one bytearray
for each dev_id, and each bytearray should be of the same length.
raises ValueError if the dimensionality of values is incorrect.
"""
if self.show & Bus.SHOW_COMMANDS:
ids = ', '.join(['{}'.format(id) for id in dev_ids])
log('Sending SYNC_WRITE to IDs {} offset 0x{:02x} len {}'.format(ids, offset, len(values[0])))
num_ids = len(dev_ids)
if num_ids != len(values):
raise ValueError('len(dev_ids) = {} must match len(values) = {}'.format(num_ids, len(values)))
bytes_per_id = len(values[0])
param_len = num_ids * (bytes_per_id + 1) + 2
data = bytearray(param_len)
data[0] = offset
data[1] = bytes_per_id
data_idx = 2
for id_idx in range(num_ids):
if len(values[id_idx]) != bytes_per_id:
raise ValueError('len(values[{}]) not equal {}'.format(id_idx, bytes_per_id))
data[data_idx] = dev_ids[id_idx]
data_idx += 1
data[data_idx:data_idx + bytes_per_id] = values[id_idx]
data_idx += bytes_per_id
self.fill_and_write_packet(packet.Id.BROADCAST, packet.Command.SYNC_WRITE, data)
def write(self, dev_id, offset, data, deferred=False):
"""Sends a WRITE request if deferred is False, or a REG_WRITE
request if deferred is True. Deferred writes will occur when
and ACTION command is broadcast.
data should be an array of ints, or a bytearray.
Raises a bus.Error if any errors occur.
"""
self.send_write(dev_id, offset, data, deferred)
if dev_id == packet.Id.BROADCAST:
return packet.ErrorCode.NONE
pkt = self.read_status_packet()
return pkt.error_code()
| mit | 4,271,133,406,594,132,500 | 36.77821 | 108 | 0.576475 | false |
takearest118/coconut | models/notification.py | 1 | 1240 | # -*- coding: utf-8 -*-
from bson import ObjectId
from enum import Enum
from models.base import BaseModel
class NotificationModel(BaseModel):
MONGO_COLLECTION = 'notification'
def __init__(self, *args, **kwargs):
super(NotificationModel, self).__init__(*args, **kwargs)
@property
def specification(self):
specification = super(NotificationModel, self).specification
specification.extend([
{
'key': 'admin_oid',
'type': ObjectId,
'default': None
},
{
'key': 'message',
'type': str,
'default': None
},
{
'key': 'type',
'type': str,
'default': None
},
{
'key': 'read',
'type': bool,
'default': (lambda: False)
},
{
'key': 'enabled',
'type': bool,
'default': (lambda: True)
},
{
'key': 'data',
'type': dict,
'default': None
}
])
return specification
| gpl-3.0 | -7,735,690,550,109,456,000 | 23.8 | 68 | 0.400806 | false |
ocefpaf/ulmo | ulmo/cuahsi/wof/core.py | 1 | 11413 | """
ulmo.wof.core
~~~~~~~~~~~~~
This module provides direct access to `CUAHSI WaterOneFlow`_ web services.
.. _CUAHSI WaterOneFlow: http://his.cuahsi.org/wofws.html
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
import io
import suds.client
import isodate
from ulmo import util
from ulmo import waterml
_suds_client = None
def get_sites(wsdl_url, suds_cache=("default",)):
"""
Retrieves information on the sites that are available from a WaterOneFlow
service using a GetSites request. For more detailed information including
which variables and time periods are available for a given site, use
``get_site_info()``.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
sites_dict : dict
a python dict with site codes mapped to site information
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == '1.0':
response = suds_client.service.GetSitesXml('')
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_site_infos(response_buffer)
elif waterml_version == '1.1':
response = suds_client.service.GetSites('')
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_site_infos(response_buffer)
return dict([
(site['network'] + ':' + site['code'], site)
for site in list(sites.values())
])
def get_site_info(wsdl_url, site_code, suds_cache=("default",)):
"""
Retrieves detailed site information from a WaterOneFlow service using a
GetSiteInfo request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get more information for. Site codes
MUST contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
site_info : dict
a python dict containing site information
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == '1.0':
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_sites(response_buffer)
elif waterml_version == '1.1':
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_sites(response_buffer)
if len(sites) == 0:
return {}
site_info = list(sites.values())[0]
series_dict = dict([
(series['variable']['vocabulary'] + ':' + series['variable']['code'],
series)
for series in site_info['series']
])
site_info['series'] = series_dict
return site_info
def get_values(wsdl_url, site_code, variable_code, start=None, end=None, suds_cache=("default",)):
"""
Retrieves site values from a WaterOneFlow service using a GetValues request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get values for. Site codes MUST
contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
variable_code : str
Variable code of the variable you'd like to get values for. Variable
codes MUST contain the network and be of the form
<vocabulary>:<variable_code>, as is required by WaterOneFlow.
start : ``None`` or datetime (see :ref:`dates-and-times`)
Start of a date range for a query. If both start and end parameters are
omitted, the entire time series available will be returned.
end : ``None`` or datetime (see :ref:`dates-and-times`)
End of a date range for a query. If both start and end parameters are
omitted, the entire time series available will be returned.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
site_values : dict
a python dict containing values
"""
suds_client = _get_client(wsdl_url, suds_cache)
# Note from Emilio:
# Not clear if WOF servers really do handle time zones (time offsets or
# "Z" in the iso8601 datetime strings. In the past, I (Emilio) have
# passed naive strings to GetValues(). if a datetime object is passed to
# this ulmo function, the isodate code above will include it in the
# resulting iso8601 string; if not, no. Test effect of dt_isostr having
# a timezone code or offset, vs not having it (the latter, naive dt
# strings, is what I've been using all along)
# the interpretation of start and end time zone is server-dependent
start_dt_isostr = None
end_dt_isostr = None
if start is not None:
start_datetime = util.convert_datetime(start)
start_dt_isostr = isodate.datetime_isoformat(start_datetime)
if end is not None:
end_datetime = util.convert_datetime(end)
end_dt_isostr = isodate.datetime_isoformat(end_datetime)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetValues(
site_code, variable_code, startDate=start_dt_isostr,
endDate=end_dt_isostr)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == '1.0':
values = waterml.v1_0.parse_site_values(response_buffer)
elif waterml_version == '1.1':
values = waterml.v1_1.parse_site_values(response_buffer)
if not variable_code is None:
return list(values.values())[0]
else:
return values
def get_variable_info(wsdl_url, variable_code=None, suds_cache=("default",)):
"""
Retrieves site values from a WaterOneFlow service using a GetVariableInfo
request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
variable_code : `None` or str
If `None` (default) then information on all variables will be returned,
otherwise, this should be set to the variable code of the variable you'd
like to get more information on. Variable codes MUST contain the
network and be of the form <vocabulary>:<variable_code>, as is required
by WaterOneFlow.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
variable_info : dict
a python dict containing variable information. If no variable code is
`None` (default) then this will be a nested set of dicts keyed by
<vocabulary>:<variable_code>
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetVariableInfo(variable_code)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == '1.0':
variable_info = waterml.v1_0.parse_variables(response_buffer)
elif waterml_version == '1.1':
variable_info = waterml.v1_1.parse_variables(response_buffer)
if not variable_code is None and len(variable_info) == 1:
return list(variable_info.values())[0]
else:
return dict([
('%s:%s' % (var['vocabulary'], var['code']), var)
for var in list(variable_info.values())
])
def _waterml_version(suds_client):
tns_str = str(suds_client.wsdl.tns[1])
if tns_str == 'http://www.cuahsi.org/his/1.0/ws/':
return '1.0'
elif tns_str == 'http://www.cuahsi.org/his/1.1/ws/':
return '1.1'
else:
raise NotImplementedError(
"only WaterOneFlow 1.0 and 1.1 are currently supported")
def _get_client(wsdl_url, cache_duration=("default",)):
"""
Open and re-use (persist) a suds.client.Client instance _suds_client throughout
the session, to minimize WOF server impact and improve performance. _suds_client
is global in scope.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
cache_duration: ``None`` or tuple
suds client local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the suds default (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
_suds_client : suds Client
Newly or previously instantiated (reused) suds Client object.
"""
global _suds_client
# Handle new or changed client request (create new client)
if _suds_client is None or _suds_client.wsdl.url != wsdl_url:
_suds_client = suds.client.Client(wsdl_url)
if cache_duration is None:
_suds_client.set_options(cache=None)
else:
cache = _suds_client.options.cache
# could add some error catching ...
if cache_duration[0] == "default":
cache.setduration(days=1)
else:
cache.setduration(**dict([cache_duration]))
return _suds_client
| bsd-3-clause | 6,928,622,012,715,005,000 | 37.819728 | 98 | 0.651625 | false |
ic-hep/DIRAC | ConfigurationSystem/Client/Helpers/Registry.py | 1 | 11267 | """ Helper for /Registry section
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
__RCSID__ = "$Id$"
# pylint: disable=missing-docstring
gBaseRegistrySection = "/Registry"
def getUsernameForDN( dn, usersList = False ):
if not usersList:
retVal = gConfig.getSections( "%s/Users" % gBaseRegistrySection )
if not retVal[ 'OK' ]:
return retVal
usersList = retVal[ 'Value' ]
for username in usersList:
if dn in gConfig.getValue( "%s/Users/%s/DN" % ( gBaseRegistrySection, username ), [] ):
return S_OK( username )
return S_ERROR( "No username found for dn %s" % dn )
def getDNForUsername( username ):
dnList = gConfig.getValue( "%s/Users/%s/DN" % ( gBaseRegistrySection, username ), [] )
if dnList:
return S_OK( dnList )
return S_ERROR( "No DN found for user %s" % username )
def getGroupsForDN( dn ):
retVal = getUsernameForDN( dn )
if not retVal[ 'OK' ]:
return retVal
return getGroupsForUser( retVal[ 'Value' ] )
def __getGroupsWithAttr( attrName, value ):
retVal = gConfig.getSections( "%s/Groups" % gBaseRegistrySection )
if not retVal[ 'OK' ]:
return retVal
groupsList = retVal[ 'Value' ]
groups = []
for group in groupsList:
if value in gConfig.getValue( "%s/Groups/%s/%s" % ( gBaseRegistrySection, group, attrName ), [] ):
groups.append( group )
if not groups:
return S_ERROR( "No groups found for %s=%s" % ( attrName,value ) )
groups.sort()
return S_OK( groups )
def getGroupsForUser( username ):
return __getGroupsWithAttr( 'Users', username )
def getGroupsForVO( vo ):
if getVO():
return gConfig.getSections( "%s/Groups" % gBaseRegistrySection )
return __getGroupsWithAttr( 'VO', vo )
def getGroupsWithProperty( propName ):
return __getGroupsWithAttr( "Properties", propName )
def getHostnameForDN( dn ):
retVal = gConfig.getSections( "%s/Hosts" % gBaseRegistrySection )
if not retVal[ 'OK' ]:
return retVal
hostList = retVal[ 'Value' ]
for hostname in hostList:
if dn in gConfig.getValue( "%s/Hosts/%s/DN" % ( gBaseRegistrySection, hostname ), [] ):
return S_OK( hostname )
return S_ERROR( "No hostname found for dn %s" % dn )
def getDefaultUserGroup():
return gConfig.getValue( "/%s/DefaultGroup" % gBaseRegistrySection, "user" )
def findDefaultGroupForDN( dn ):
result = getUsernameForDN( dn )
if not result[ 'OK' ]:
return result
return findDefaultGroupForUser( result[ 'Value' ] )
def findDefaultGroupForUser( userName ):
userDefGroups = getUserOption( userName, "DefaultGroup", [] )
defGroups = userDefGroups + gConfig.getValue( "%s/DefaultGroup" % gBaseRegistrySection, [ "user" ] )
result = getGroupsForUser( userName )
if not result[ 'OK' ]:
return result
userGroups = result[ 'Value' ]
for group in defGroups:
if group in userGroups:
return S_OK( group )
if userGroups:
return S_OK( userGroups[0] )
return S_ERROR( "User %s has no groups" % userName )
def getAllUsers():
retVal = gConfig.getSections( "%s/Users" % gBaseRegistrySection )
if not retVal[ 'OK' ]:
return []
return retVal[ 'Value' ]
def getAllGroups():
retVal = gConfig.getSections( "%s/Groups" % gBaseRegistrySection )
if not retVal[ 'OK' ]:
return []
return retVal[ 'Value' ]
def getUsersInGroup( groupName, defaultValue = None ):
if defaultValue is None:
defaultValue = []
option = "%s/Groups/%s/Users" % ( gBaseRegistrySection, groupName )
return gConfig.getValue( option, defaultValue )
def getUsersInVO( vo, defaultValue = None ):
if defaultValue is None:
defaultValue = []
result = getGroupsForVO( vo )
if not result['OK']:
return defaultValue
groups = result['Value']
if not groups:
return defaultValue
userList = []
for group in groups:
userList += getUsersInGroup( group )
return userList
def getDNsInVO( vo ):
DNs = []
for user in getUsersInVO( vo ):
result = getDNForUsername( user )
if result[ 'OK' ]:
DNs.extend( result[ 'Value' ] )
return DNs
def getDNsInGroup( groupName ):
DNs = []
for user in getUsersInGroup( groupName ):
result = getDNForUsername( user )
if result[ 'OK' ]:
DNs.extend( result[ 'Value' ] )
return DNs
def getPropertiesForGroup( groupName, defaultValue = None ):
if defaultValue is None:
defaultValue = []
option = "%s/Groups/%s/Properties" % ( gBaseRegistrySection, groupName )
return gConfig.getValue( option, defaultValue )
def getPropertiesForHost( hostName, defaultValue = None ):
if defaultValue is None:
defaultValue = []
option = "%s/Hosts/%s/Properties" % ( gBaseRegistrySection, hostName )
return gConfig.getValue( option, defaultValue )
def getPropertiesForEntity( group, name = "", dn = "", defaultValue = None ):
if defaultValue is None:
defaultValue = []
if group == 'hosts':
if not name:
result = getHostnameForDN( dn )
if not result[ 'OK' ]:
return defaultValue
name = result[ 'Value' ]
return getPropertiesForHost( name, defaultValue )
else:
return getPropertiesForGroup( group, defaultValue )
def __matchProps( sProps, rProps ):
foundProps = []
for prop in sProps:
if prop in rProps:
foundProps.append( prop )
return foundProps
def groupHasProperties( groupName, propList ):
if isinstance( propList, basestring ):
propList = [ propList ]
return __matchProps( propList, getPropertiesForGroup( groupName ) )
def hostHasProperties( hostName, propList ):
if isinstance( propList, basestring ):
propList = [ propList ]
return __matchProps( propList, getPropertiesForHost( hostName ) )
def getUserOption( userName, optName, defaultValue = "" ):
return gConfig.getValue( "%s/Users/%s/%s" % ( gBaseRegistrySection, userName, optName ), defaultValue )
def getGroupOption( groupName, optName, defaultValue = "" ):
return gConfig.getValue( "%s/Groups/%s/%s" % ( gBaseRegistrySection, groupName, optName ), defaultValue )
def getHostOption( hostName, optName, defaultValue = "" ):
return gConfig.getValue( "%s/Hosts/%s/%s" % ( gBaseRegistrySection, hostName, optName ), defaultValue )
def getHosts():
return gConfig.getSections( '%s/Hosts' % gBaseRegistrySection )
def getVOOption( voName, optName, defaultValue = "" ):
return gConfig.getValue( "%s/VO/%s/%s" % ( gBaseRegistrySection, voName, optName ), defaultValue )
def getBannedIPs():
return gConfig.getValue( "%s/BannedIPs" % gBaseRegistrySection, [] )
def getVOForGroup( group ):
voName = getVO()
if voName:
return voName
return gConfig.getValue( "%s/Groups/%s/VO" % ( gBaseRegistrySection, group ), "" )
def getDefaultVOMSAttribute():
return gConfig.getValue( "%s/DefaultVOMSAttribute" % gBaseRegistrySection, "" )
def getVOMSAttributeForGroup( group ):
return gConfig.getValue( "%s/Groups/%s/VOMSRole" % ( gBaseRegistrySection, group ), getDefaultVOMSAttribute() )
def getDefaultVOMSVO():
vomsVO = gConfig.getValue( "%s/DefaultVOMSVO" % gBaseRegistrySection, "" )
if vomsVO:
return vomsVO
return getVO()
def getVOMSVOForGroup( group ):
vomsVO = gConfig.getValue( "%s/Groups/%s/VOMSVO" % ( gBaseRegistrySection, group ), getDefaultVOMSVO() )
if not vomsVO:
vo = getVOForGroup( group )
vomsVO = getVOOption( vo, 'VOMSName', '' )
return vomsVO
def getGroupsWithVOMSAttribute( vomsAttr ):
retVal = gConfig.getSections( "%s/Groups" % ( gBaseRegistrySection ) )
if not retVal[ 'OK' ]:
return []
groups = []
for group in retVal[ 'Value' ]:
if vomsAttr == gConfig.getValue( "%s/Groups/%s/VOMSRole" % ( gBaseRegistrySection, group ), "" ):
groups.append( group )
return groups
def getVOs():
""" Get all the configured VOs
"""
voName = getVO()
if voName:
return S_OK([ voName] )
return gConfig.getSections( '%s/VO' % gBaseRegistrySection )
def getVOMSServerInfo( requestedVO = '' ):
""" Get information on VOMS servers for the given VO or for all of them
"""
vomsDict = {}
# For backward compatibility check the VOMS section first
result = gConfig.getSections( '%s/VOMS/Servers' % gBaseRegistrySection )
if result['OK']:
voNames = result['Value']
for vo in voNames:
if requestedVO and vo != requestedVO:
continue
vomsDict.setdefault( vo, {} )
vomsDict[vo]['VOMSName'] = vo
result = gConfig.getSections( '%s/VOMS/Servers/%s' % (gBaseRegistrySection, vo) )
if result['OK']:
serverList = result['Value']
vomsDict[vo].setdefault( "Servers", {} )
for server in serverList:
DN = gConfig.getValue( '%s/VOMS/Servers/%s/%s/DN' % (gBaseRegistrySection, vo, server), '' )
CA = gConfig.getValue( '%s/VOMS/Servers/%s/%s/CA' % (gBaseRegistrySection, vo, server), '' )
port = gConfig.getValue( '%s/VOMS/Servers/%s/%s/Port' % (gBaseRegistrySection, vo, server), 0 )
vomsDict[vo]['Servers'].setdefault( server, {} )
vomsDict[vo]['Servers'][server]['DN'] = DN
vomsDict[vo]['Servers'][server]['CA'] = CA
vomsDict[vo]['Servers'][server]['Port'] = port
result = getVOs()
if result['OK']:
voNames = result['Value']
for vo in voNames:
if requestedVO and vo != requestedVO:
continue
vomsName = getVOOption( vo, 'VOMSName', '' )
if not vomsName:
continue
vomsDict.setdefault( vo, {} )
vomsDict[vo]['VOMSName'] = getVOOption( vo, 'VOMSName', '' )
result = gConfig.getSections( '%s/VO/%s/VOMSServers' % (gBaseRegistrySection, vo) )
if result['OK']:
serverList = result['Value']
vomsDict[vo].setdefault( "Servers", {} )
for server in serverList:
vomsDict[vo]['Servers'].setdefault( server, {} )
DN = gConfig.getValue( '%s/VO/%s/VOMSServers/%s/DN' % (gBaseRegistrySection, vo, server), '' )
CA = gConfig.getValue( '%s/VO/%s/VOMSServers/%s/CA' % (gBaseRegistrySection, vo, server), '' )
port = gConfig.getValue( '%s/VO/%s/VOMSServers/%s/Port' % (gBaseRegistrySection, vo, server), 0 )
vomsDict[vo]['Servers'][server]['DN'] = DN
vomsDict[vo]['Servers'][server]['CA'] = CA
vomsDict[vo]['Servers'][server]['Port'] = port
return S_OK( vomsDict )
def getVOMSRoleGroupMapping( vo = '' ):
""" Get mapping of the VOMS role to the DIRAC group
:param str vo: perform the operation for the given VO
:return: standard structure with two mappings: VOMS-DIRAC { <VOMS_Role>: [<DIRAC_Group>] }
and DIRAC-VOMS { <DIRAC_Group>: <VOMS_Role> } and a list of DIRAC groups without mapping
"""
result = getGroupsForVO( vo )
if not result['OK']:
return result
groupList = result['Value']
vomsGroupDict = {}
groupVomsDict = {}
noVOMSGroupList = []
for group in groupList:
vomsRole = getGroupOption( group, 'VOMSRole' )
if vomsRole:
vomsGroupDict.setdefault( vomsRole, [] )
vomsGroupDict[vomsRole].append( group )
groupVomsDict[group] = vomsRole
for group in groupList:
if not group in groupVomsDict:
noVOMSGroupList.append(group)
return S_OK( { "VOMSDIRAC": vomsGroupDict, "DIRACVOMS": groupVomsDict, "NoVOMS": noVOMSGroupList } )
| gpl-3.0 | 7,613,487,152,467,321,000 | 33.35061 | 113 | 0.665838 | false |
frank2/paranoia | lib/base/event.py | 1 | 2058 | #!/usr/bin/env python
import inspect
from paranoia.base.paranoia_agent import ParanoiaAgent, ParanoiaError
__all__ = ['get_event_base', 'EventError', 'Event', 'InstantiateEvent'
,'SetPropertyEvent', 'NewAddressEvent', 'NewShiftEvent', 'NewSizeEvent'
,'SetValueEvent', 'DeclareSubregionEvent', 'MoveSubregionEvent'
,'RemoveSubregionEvent']
class EventError(ParanoiaError):
pass
def get_event_base(event_class):
if isinstance(event_class, Event):
event_class = event_class.__class__
if not inspect.isclass(event_class):
raise EventError('event class must be a class')
if not issubclass(event_class, Event):
raise EventError('class must derive Event')
if event_class == Event:
raise EventError('cannot get base of root class')
base_class = event_class
while not Event in base_class.__bases__:
base_class = base_class.__bases__[0]
return base_class
class Event(ParanoiaAgent):
def __call__(self, *args):
raise NotImplementedError
class InstantiateEvent(Event):
def __call__(self, decl, instance, kwargs):
raise NotImplementedError
class SetPropertyEvent(Event):
def __call__(self, decl, prop, value):
raise NotImplementedError
class NewAddressEvent(Event):
def __call__(self, decl, address, shift):
raise NotImplementedError
class NewShiftEvent(Event):
def __call__(self, decl, shift):
raise NotImplementedError
class NewSizeEvent(Event):
def __call__(self, decl, old_size, new_size):
raise NotImplementedError
class SetValueEvent(Event):
def __call__(self, decl, value):
raise NotImplementedError
class DeclareSubregionEvent(Event):
def __call__(self, decl, subregion):
raise NotImplementedError
class MoveSubregionEvent(Event):
def __call__(self, decl, old_offset, new_offset):
raise NotImplementedError
class RemoveSubregionEvent(Event):
def __call__(self, decl, subregion):
raise NotImplementedError
| gpl-3.0 | -8,133,959,076,121,812,000 | 27.191781 | 82 | 0.674441 | false |
peterbrittain/asciimatics | samples/treeview.py | 1 | 2992 | #!/usr/bin/env python3
from asciimatics.event import KeyboardEvent
from asciimatics.widgets import Frame, Layout, FileBrowser, Widget, Label, PopUpDialog, Text, \
Divider
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, StopApplication
import sys
import os
try:
import magic
except ImportError:
pass
class DemoFrame(Frame):
def __init__(self, screen):
super(DemoFrame, self).__init__(
screen, screen.height, screen.width, has_border=False, name="My Form")
# Create the (very simple) form layout...
layout = Layout([1], fill_frame=True)
self.add_layout(layout)
# Now populate it with the widgets we want to use.
self._details = Text()
self._details.disabled = True
self._details.custom_colour = "field"
self._list = FileBrowser(Widget.FILL_FRAME,
os.path.abspath("."),
name="mc_list",
on_select=self.popup,
on_change=self.details)
layout.add_widget(Label("Local disk browser sample"))
layout.add_widget(Divider())
layout.add_widget(self._list)
layout.add_widget(Divider())
layout.add_widget(self._details)
layout.add_widget(Label("Press Enter to select or `q` to quit."))
# Prepare the Frame for use.
self.fix()
def popup(self):
# Just confirm whenever the user actually selects something.
self._scene.add_effect(
PopUpDialog(self._screen, "You selected: {}".format(self._list.value), ["OK"]))
def details(self):
# If python magic is installed, provide a little more detail of the current file.
if self._list.value:
if os.path.isdir(self._list.value):
self._details.value = "Directory"
elif os.path.isfile(self._list.value):
try:
self._details.value = magic.from_file(self._list.value)
except NameError:
self._details.value = "File (run 'pip install python-magic' for more details)"
else:
self._details.value = "--"
def process_event(self, event):
# Do the key handling for this Frame.
if isinstance(event, KeyboardEvent):
if event.key_code in [ord('q'), ord('Q'), Screen.ctrl("c")]:
raise StopApplication("User quit")
# Now pass on to lower levels for normal handling of the event.
return super(DemoFrame, self).process_event(event)
def demo(screen, old_scene):
screen.play([Scene([DemoFrame(screen)], -1)], stop_on_resize=True, start_scene=old_scene)
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=False, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
| apache-2.0 | 4,689,903,952,881,384,000 | 35.048193 | 98 | 0.602273 | false |
simbtrix/screenmix | screenmix/ackModel/ack.py | 1 | 1349 | '''
Created on 01.08.2016
@author: mkennert
'''
from kivy.properties import ObjectProperty
from kivy.uix.gridlayout import GridLayout
from ackModel.ackRect import AckRect
class Ack(GridLayout):
'''
ack contains all acks from the different shapes. it manage which ack-should
show in the ack-menu, which is append of the cross-section shape
'''
# all acks of the application
ackRect = ObjectProperty()
#####################################################
# here you can add more ack's. When you add one more #
# make sure, that the ack has a show method like the #
# show_ack_rect #
#####################################################
# constructor
def __init__(self, **kwargs):
super(Ack, self).__init__(**kwargs)
self.cols = 1
# default ack is the ack of the rectangle shape
self.ackRect = AckRect()
self.content = self.ackRect
self.add_widget(self.content)
'''
show the ack of the shape rectangle
'''
def show_ack_rect(self):
# remove the old content
self.remove_widget(self.content)
self.add_widget(self.ackRect)
# safe the new ack as content
self.content = self.ackRect
| gpl-3.0 | -1,378,046,519,291,623,700 | 27.326087 | 79 | 0.535211 | false |
Matoking/pastebin-django | home/tests.py | 1 | 7947 | from pastebin.testcase import CacheAwareTestCase
from freezegun import freeze_time
from django.core.urlresolvers import reverse
@freeze_time("2015-01-01")
class LatestPastesTests(CacheAwareTestCase):
def test_latest_pastes_empty(self):
"""
Test that latest pastes shows the "no pastes uploaded" message when no pastes
have been uploaded
"""
response = self.client.get(reverse("latest_pastes"))
self.assertContains(response, "No pastes uploaded")
def test_latest_pastes_with_pastes(self):
"""
Upload two pastes and check that they're visible on the list
"""
self.client.post(reverse("home:home"), { "title": "Paste",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "public"},
follow=True)
self.client.post(reverse("home:home"), { "title": "Paste 2",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "public"},
follow=True)
response = self.client.get(reverse("latest_pastes"))
self.assertContains(response, "Paste")
self.assertContains(response, "Paste 2")
def test_latest_pastes_shows_correct_pastes(self):
"""
Upload hidden and expiring paste and make sure hidden and expiring pastes
aren't shown when they shouldn't be shown
"""
with freeze_time("2015-01-01 12:00:00"):
for i in range(0, 5):
self.client.post(reverse("home:home"), {"title": "Normal paste %d" % i,
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "public"},
follow=True)
for i in range(0, 5):
self.client.post(reverse("home:home"), {"title": "Expiring paste %d" % i,
"text": "This is a test",
"syntax_highlighting": "text",
"expiration": "1h",
"visibility": "public"},
follow=True)
self.client.post(reverse("home:home"), {"title": "Hidden paste",
"text": "This is a test",
"syntax_highlighting": "text",
"expiration": "1h",
"visibility": "hidden"},
follow=True)
response = self.client.get(reverse("latest_pastes"))
self.assertContains(response, "Normal paste", count=5)
self.assertContains(response, "Expiring paste", count=5)
self.assertNotContains(response, "Hidden paste")
with freeze_time("2015-01-01 13:00:01"):
self.clearCache()
response = self.client.get(reverse("latest_pastes"))
self.assertContains(response, "Normal paste", count=5)
self.assertNotContains(response, "Expiring paste")
self.assertNotContains(response, "Hidden paste")
def test_latest_pastes_redirects_to_last_page(self):
"""
Try checking a page of latest pastes which doesn't exist
User should be redirected to the last page
"""
self.client.post(reverse("home:home"), {"title": "Test paste",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "public"},
follow=True)
response = self.client.get(reverse("latest_pastes", kwargs={"page": 2}))
self.assertContains(response, "Test paste")
self.assertContains(response, "1</span>")
self.assertNotContains(response, "2</span>")
def test_latest_pastes_doesnt_show_hidden_pastes(self):
"""
Upload a hidden paste and check that it isn't visible in the latest pastes
"""
self.client.post(reverse("home:home"), {"title": "Paste paste",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "hidden"},
follow=True)
response = self.client.get(reverse("latest_pastes"))
self.assertContains(response, "No pastes uploaded")
def test_latest_pastes_doesnt_show_expired_pastes(self):
"""
Upload an expiring paste and check that it isn't visible after it has expired
"""
with freeze_time("2015-01-01 12:00:00"):
self.client.post(reverse("home:home"), {"title": "Paste paste",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "1h",
"visibility": "public"},
follow=True)
self.clearCache()
response = self.client.get(reverse("home:home"))
self.assertContains(response, "Paste paste")
with freeze_time("2015-01-01 13:00:01"):
self.clearCache()
response = self.client.get(reverse("home:home"))
self.assertContains(response, "No pastes have been submitted yet")
def test_random_with_no_pastes_redirects_to_home(self):
"""
Try going to a random paste when no pastes have been uploaded
User should be redirect to home.
"""
response = self.client.post(reverse("random_paste"), follow=True)
self.assertContains(response, "Upload a new paste")
def test_random_with_paste(self):
"""
Upload one paste and go to a random paste
"""
self.client.post(reverse("home:home"), { "title": "Test paste",
"text": "This is a test.",
"syntax_highlighting": "text",
"expiration": "never",
"visibility": "public"},
follow=True)
response = self.client.post(reverse("random_paste"), follow=True)
self.assertContains(response, "Test paste") | unlicense | 1,292,723,512,337,146,600 | 47.760736 | 89 | 0.423682 | false |
davecroll/data-tasks | datatasks/sources/DatabaseSource.py | 1 | 1161 | # datatasks\sources\DatabaseSource.py
import os
from collections import OrderedDict
from datatasks.db.DatabaseFactory import DatabaseFactory
from datatasks.utils import convert_to_primary_datatypes
from .DataSource import DataSource
class DatabaseSource(DataSource):
"""
Inherits RecordList and is capable of reading records in from a database.
Capable of being managed by DataEntityManager. The filepath_str parameter
is the file object for a text file containing a sql query.
"""
def __init__(self, name, db_name, filepath, **kwargs):
self._db = DatabaseFactory().get_db(db_name)
super().__init__(name, **kwargs)
self.filepath = filepath
def read_in(self):
"""Reads records in from the database using the provide sql query."""
if self.loaded:
return
try:
with open(self.filepath, 'r') as sql_file:
sql_query_str = sql_file.read()
except:
raise FileNotFoundError('cannot read from query file')
self.loaded = True
for record in self._db.get_query_records(sql_query_str):
self.load(record)
| mit | 1,022,893,840,000,731,100 | 32.171429 | 78 | 0.660637 | false |
sfu-natlang/HMM-Aligner | src/models/Old/IBM1WithAlignmentType.py | 1 | 5456 | # -*- coding: utf-8 -*-
#
# IBM model 1 with alignment type implementation of HMM Aligner
# Simon Fraser University
# NLP Lab
#
# This is the implementation of IBM model 1 word aligner with alignment type.
#
from collections import defaultdict
from loggers import logging
from models.IBM1Base import AlignmentModelBase as IBM1Base
from evaluators.evaluator import evaluate
__version__ = "0.4a"
class AlignmentModel(IBM1Base):
def __init__(self):
self.modelName = "IBM1WithPOSTagAndAlignmentType"
self.version = "0.2b"
self.logger = logging.getLogger('IBM1')
self.evaluate = evaluate
self.fe = ()
self.s = defaultdict(list)
self.sTag = defaultdict(list)
self.index = 0
self.typeList = []
self.typeIndex = {}
self.typeDist = []
self.lambd = 1 - 1e-20
self.lambda1 = 0.9999999999
self.lambda2 = 9.999900827395436E-11
self.lambda3 = 1.000000082740371E-15
self.loadTypeDist = {"SEM": .401, "FUN": .264, "PDE": .004,
"CDE": .004, "MDE": .012, "GIS": .205,
"GIF": .031, "COI": .008, "TIN": .003,
"NTR": .086, "MTA": .002}
self.modelComponents = ["t", "s", "sTag",
"typeList", "typeIndex", "typeDist",
"lambd", "lambda1", "lambda2", "lambda3"]
IBM1Base.__init__(self)
return
def _beginningOfIteration(self):
self.c = defaultdict(float)
self.total = defaultdict(float)
self.c_feh = defaultdict(
lambda: [0.0 for h in range(len(self.typeList))])
return
def _updateCount(self, fWord, eWord, z, index):
tPr_z = self.tProbability(fWord, eWord) / z
self.c[(fWord[self.index], eWord[self.index])] += tPr_z
self.total[eWord[self.index]] += tPr_z
c_feh = self.c_feh[(fWord[self.index], eWord[self.index])]
for h in range(len(self.typeIndex)):
c_feh[h] += tPr_z * self.sProbability(fWord, eWord, h)
return
def _updateEndOfIteration(self):
for (f, e) in self.c:
self.t[(f, e)] = self.c[(f, e)] / self.total[e]
s = self.s if self.index == 0 else self.sTag
for f, e in self.c_feh:
c_feh = self.c_feh[(f, e)]
s_tmp = s[(f, e)]
for h in range(len(self.typeIndex)):
s_tmp[h] = c_feh[h] / self.c[(f, e)]
return
def sProbability(self, f, e, h):
fWord, fTag = f
eWord, eTag = e
if self.fe != (f, e):
self.fe, sKey, sTagKey = (f, e), (f[0], e[0]), (f[1], e[1])
self.sTmp = self.s[sKey] if sKey in self.s else None
self.sTagTmp = self.sTag[sTagKey] if sTagKey in self.sTag else None
sTmp = self.sTmp[h] if self.sTmp else 0
sTagTmp = self.sTagTmp[h] if self.sTagTmp else 0
if self.index == 0:
p1 = (1 - self.lambd) * self.typeDist[h] + self.lambd * sTmp
p2 = (1 - self.lambd) * self.typeDist[h] + self.lambd * sTagTmp
p3 = self.typeDist[h]
return self.lambda1 * p1 + self.lambda2 * p2 + self.lambda3 * p3
else:
return (1 - self.lambd) * self.typeDist[h] + self.lambd * sTagTmp
def tProbability(self, f, e):
return IBM1Base.tProbability(self, f, e, self.index)
def decodeSentence(self, sentence):
f, e, align = sentence
sentenceAlignment = []
for i in range(len(f)):
max_ts = 0
argmax = -1
bestType = -1
for j in range(len(e)):
t = self.tProbability(f, e)
for h in range(len(self.typeIndex)):
s = self.sProbability(f[i], e[j], h)
if t * s > max_ts:
max_ts = t * s
argmax = j
bestType = h
sentenceAlignment.append(
(i + 1, argmax + 1, self.typeList[bestType]))
return sentenceAlignment
def trainStage1(self, dataset, iterations=5):
self.logger.info("Stage 1 Start Training with POS Tags")
self.logger.info("Initialising model with POS Tags")
# self.index set to 1 means training with POS Tag
self.index = 1
self.initialiseBiwordCount(dataset, self.index)
self.sTag = self.calculateS(dataset, self.fe_count, self.index)
self.logger.info("Initialisation complete")
self.EM(dataset, iterations, 'IBM1TypeS1')
# reset self.index to 0
self.index = 0
self.logger.info("Stage 1 Complete")
return
def trainStage2(self, dataset, iterations=5):
self.logger.info("Stage 2 Start Training with FORM")
self.logger.info("Initialising model with FORM")
self.initialiseBiwordCount(dataset, self.index)
self.s = self.calculateS(dataset, self.fe_count, self.index)
self.logger.info("Initialisation complete")
self.EM(dataset, iterations, 'IBM1TypeS2')
self.logger.info("Stage 2 Complete")
return
def train(self, dataset, iterations=5):
self.logger.info("Initialising Alignment Type Distribution")
self.initialiseAlignTypeDist(dataset, self.loadTypeDist)
self.trainStage1(dataset, iterations)
self.trainStage2(dataset, iterations)
return
| mit | -7,192,015,633,586,471,000 | 37.422535 | 79 | 0.559751 | false |
SMMAR11/smmarbsence | app/forms/admin.py | 1 | 4838 | # coding: utf-8
# Imports
from app.models import *
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
class FGroupeUtilisateur(forms.ModelForm) :
# Champ
util = forms.ModelMultipleChoiceField(
label = 'Utilisateurs composant le groupe',
queryset = TUtilisateur.objects.order_by('username'),
required = False,
widget = FilteredSelectMultiple('T_UTILISATEUR', is_stacked = False)
)
class Meta :
fields = '__all__'
model = TGroupeUtilisateur
def __init__(self, *args, **kwargs) :
super(FGroupeUtilisateur, self).__init__(*args, **kwargs)
# Définition de la valeur initiale du champ personnalisé
if self.instance.get_pk() :
self.fields['util'].initial = [u.get_pk() for u in self.instance.get_util_set().all()]
def save(self, *args, **kwargs) :
# Création/modification d'une instance TGroupeUtilisateur
obj = super(FGroupeUtilisateur, self).save(*args, **kwargs)
obj.save()
# Liaison avec la table t_groupes_utilisateur
obj.get_gpe_util_set().all().delete()
for u in self.cleaned_data.get('util') : TGroupesUtilisateur.objects.create(id_gpe_util = obj, id_util = u)
return obj
class FUtilisateurCreate(forms.ModelForm) :
# Champs
zs_password = forms.CharField(label = 'Mot de passe', widget = forms.PasswordInput())
zs_password_bis = forms.CharField(label = 'Confirmation du mot de passe', widget = forms.PasswordInput())
class Meta :
fields = [
'email',
'first_name',
'is_active',
'is_staff',
'is_superuser',
'last_name',
'username'
]
labels = { 'email' : 'Courriel principal', 'last_name' : 'Nom de famille' }
model = TUtilisateur
def __init__(self, *args, **kwargs) :
# Initialisation des arguments
self.kw_test = kwargs.pop('kw_test', False)
super(FUtilisateurCreate, self).__init__(*args, **kwargs)
# Passage de certains champs à l'état requis
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
def clean_zs_password_bis(self) :
# Stockage des données du formulaire
val_password = self.cleaned_data.get('zs_password')
val_password_bis = self.cleaned_data.get('zs_password_bis')
# Renvoi d'une erreur si non-similitude des mots de passe
if val_password and val_password_bis and val_password != val_password_bis :
raise forms.ValidationError('Les mots de passe saisis ne correspondent pas.')
def save(self, *args, **kwargs) :
# Création d'une instance TUtilisateur
obj = super(FUtilisateurCreate, self).save(*args, **kwargs)
obj.set_password(self.cleaned_data.get('zs_password'))
obj.save()
# Liaison obligatoire avec la table t_roles_utilisateur
if 'A' not in obj.get_type_util__list() :
TRolesUtilisateur.objects.create(code_type_util = TTypeUtilisateur.objects.get(pk = 'A'), id_util = obj)
return obj
class FUtilisateurUpdate(forms.ModelForm) :
# Import
from django.contrib.auth.forms import ReadOnlyPasswordHashField
# Champ
password = ReadOnlyPasswordHashField(
help_text = '''
Les mots de passe ne sont pas enregistrés en clair, ce qui ne permet pas d'afficher le mot de passe de cet
utilisateur, mais il est possible de le changer en utilisant <a href="../password/">ce formulaire</a>.
''',
label = 'Mot de passe'
)
class Meta :
fields = [
'email',
'first_name',
'is_active',
'is_staff',
'is_superuser',
'last_name',
'username'
]
labels = { 'email' : 'Courriel principal', 'last_name' : 'Nom de famille' }
model = TUtilisateur
def __init__(self, *args, **kwargs) :
super(FUtilisateurUpdate, self).__init__(*args, **kwargs)
# Passage de certains champs à l'état requis
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
def clean_password(self) : return self.initial['password']
def save(self, *args, **kwargs) :
# Modification d'une instance TUtilisateur
obj = super(FUtilisateurUpdate, self).save(*args, **kwargs).save()
# Liaison obligatoire avec la table t_roles_utilisateur
if 'A' not in obj.get_type_util__list() :
TRolesUtilisateur.objects.create(code_type_util = TTypeUtilisateur.objects.get(pk = 'A'), id_util = obj)
return obj | gpl-3.0 | 1,337,630,901,324,975,600 | 33.248227 | 116 | 0.612676 | false |
Gram21/ctfcode | pwnit.py | 1 | 4547 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Python exploit template.
#
# Author: Jan Keim aka Gram21, Gramarye
from pwn import *
####################################
# Target System #
####################################
# Server Connection
target = "localhost"
port = 1337
# Process Connection
#binary = "./binary"
# Context: i386/amd64/... and linux/freebsd/windows
context.update(arch='i386', os='linux')
####################################
# Settings #
####################################
# Set the context level to debug:
DEBUG = False
# Set if recv should automatically print
PRINTER = True
# Std timeout for the connection
TIMEOUT = 2
# Std print color. None means no extra color
STD_COL = None
####################################
# Colors #
####################################
class col:
BLACK = '30'
RED = '31'
GREEN = '32'
BROWN = '33'
YELLOW = '33'
BLUE = '34'
MAGENTA = '35'
CYAN = '36'
WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\033['
ESCAPE_END = 'm'
####################################
# print methods #
####################################
"""method to print a string more pretty"""
def prettyprint(s, color=STD_COL):
if color == None:
print s
else:
# TODO differentiate between printable and "hex"?
coloring = col.ESCAPE_START + color + col.ESCAPE_END
clear = col.ESCAPE_START + col.CLEAR + col.ESCAPE_END
print coloring + s + clear
def print_good(s):
prettyprint(s, color=col.GREEN)
def print_bad(s):
prettyprint(s, color=col.RED)
def print_info(s):
prettyprint(s, color=col.YELLOW)
def print_bold(s):
prettyprint(s, color=col.BOLD)
def print_underline(s):
prettyprint(s, color=col.UNDERLINE)
####################################
# convenience wrappers #
####################################
def send(s=""):
r.send(s)
"""send with a newline at the end"""
def sendline(s=""):
r.sendline(s)
"""recvuntil then send"""
def sendafter(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendafter(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recvuntil then sendline"""
def sendlineafter(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendlineafter(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""sendline and then recvuntil"""
def sendlinethen(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendlinethen(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""send and then recvuntil"""
def sendthen(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendthen(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
def recv(shallprint=PRINTER, color=STD_COL):
tmp = r.recv()
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until a newline is found"""
def recvline(shallprint=PRINTER, color=STD_COL):
tmp = r.recvline()
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until s appeared. drop s if drop=true"""
def recvuntil(s, shallprint=PRINTER, drop=False, color=STD_COL):
tmp = r.recvuntil(s,drop)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv n bytes"""
def recvn(n, shallprint=PRINTER, color=STD_COL):
tmp = r.recvn(n)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until regex is found"""
def recvregex(regex, shallprint=PRINTER, exact=False, color=STD_COL):
tmp = r.recvregex(regex, exact)
if shallprint:
prettyprint(tmp, color)
return tmp
####################################
# PWN #
####################################
if DEBUG:
context.log_level = 'debug'
# Connect to target
r = remote(target, port, timeout=TIMEOUT)
# Connect to process
#r = process(binary)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Your code here
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Things that can be used:
# sleep(1)
# pause()/pause(n) -> waits for user input or n seconds
# cyclic(100), cyclic_find("aaaa")
# p32(0xdeadbeef), u32(s), p32(0xdeadbeef, endian='big') etc.
# asm(shellcraft.sh()) or similar
def pwn():
pass
# start the pwn
if __name__ == "__main__":
pause() # requires user input to start (e.g. waiting for server etc)
pwn()
# When there is a shell
# r.interactive()
| mit | -3,380,402,577,287,018,500 | 23.058201 | 72 | 0.553992 | false |
frankyrumple/ope | laptop_credential/winsys/tests/test_fs/test_filepath.py | 2 | 6889 | import os
import filecmp
import shutil
import tempfile
import unittest as unittest0
try:
unittest0.skipUnless
unittest0.skip
except AttributeError:
import unittest2 as unittest
else:
unittest = unittest0
del unittest0
import uuid
import win32file
from winsys import fs
from winsys.tests import utils
def _test_parts (path, result, skip_rejoin=False):
parts = fs.get_parts (path)
assert parts == result
assert parts == fs.get_parts (path.replace ("\\", "/"))
if not skip_rejoin:
assert parts[0] + fs.sep.join (parts[1:]) == path
class TestFilepath (unittest.TestCase):
#
# get_parts: UNC
#
def test_unc_bare (self):
_test_parts (r"\\server\share", ["\\\\server\\share\\", ""], skip_rejoin=True)
def test_unc_root (self):
_test_parts ("\\\\server\\share\\", ["\\\\server\\share\\", ""])
def test_unc_directory (self):
_test_parts ("\\\\server\\share\\test\\", ["\\\\server\\share\\", "test"], skip_rejoin=True)
def test_unc_non_directory (self):
_test_parts (r"\\server\share\test", ["\\\\server\\share\\", "test"])
#
# get_parts: Volume-Drive
#
def test_volume_bare (self):
#
# No special-casing here...
#
_test_parts (r"\\?\C:", ["\\\\?\\C:\\", ""], skip_rejoin=True)
def test_volume_bare_leaf (self):
#
# This one's a bit awkward; maybe we should raise an
# exception at an invalid filename, but we're not
# validating anywhere else, so just best-guess it.
#
_test_parts (r"\\?\C:abc.txt", ["\\\\?\\C:\\", "abc.txt"], skip_rejoin=True)
def test_volume_root (self):
_test_parts ("\\\\?\\C:\\", ["\\\\?\\C:\\", ""])
def test_volume_directory (self):
_test_parts ("\\\\?\\C:\\test\\", ["\\\\?\\C:\\", "test"], skip_rejoin=True)
def test_volume_non_directory (self):
_test_parts ("\\\\?\\C:\\test\\abc.txt", ["\\\\?\\C:\\", "test", "abc.txt"])
#
# get_parts: Drive
#
def test_drive_bare (self):
_test_parts ("C:", ["C:", ""])
def test_drive_bare_leaf (self):
_test_parts (r"C:abc.txt", ["C:", "abc.txt"])
def test_drive_root (self):
_test_parts ("C:\\", ["C:\\", ""])
def test_drive_directory (self):
_test_parts ("C:\\test\\", ["C:\\", "test"], skip_rejoin=True)
def test_drive_non_directory (self):
_test_parts ("C:\\test\\abc.txt", ["C:\\", "test", "abc.txt"])
#
# filepath
#
test_cases = [line.split () for line in """
path root filename name dirname path parent base ext
\\\\a\\b\\c\\d.txt \\\\a\\b\\ d.txt d.txt c \\\\a\\b\\c \\\\a\\b\\c d .txt
c:\\boot.ini c:\\ boot.ini boot.ini _ c:\\ c:\\ boot .ini
boot.ini _ boot.ini boot.ini _ _ x_fs boot .ini
c:\\t c:\\ t t _ c:\\ c:\\ t _
c:\\t\\ c:\\ t t _ c:\\ c:\\ t _
c:\\t\\a.txt c:\\ a.txt a.txt t c:\\t c:\\t a .txt
c:a.txt c: a.txt a.txt _ c: x_fs a .txt
a.txt _ a.txt a.txt _ _ x_fs a .txt
""".splitlines () if line.strip ()]
def test_filepath (self):
test_order = self.test_cases[0][1:]
for test_case in self.test_cases[1:]:
path, rest = test_case[0], test_case[1:]
fp = fs.filepath (path)
for n, test_item in enumerate (test_order):
result = rest[n]
if result.startswith ("x_"):
exc = getattr (fs, result)
try:
getattr (fp, test_item)
except exc:
pass
else:
raise RuntimeError (
"Path: %s; Part: %s; expected exception %s" % (path, test_item, result)
)
else:
if result == "_":
result = ""
self.assertEquals (
result,
getattr (fp, test_item),
"Path: %s; Part %s; expected: %s; result: %s" % (path, test_item, result, getattr (fp, test_item))
)
#
# Concatenation
#
left = [r"c:\\", r"c:\temp", r"c:\temp\abc.txt", "temp"]
right = ["abc", r"c:\temp", r"\abc", "abc"]
def test_add (self):
for l in self.left:
for r in self.right:
self.assertEquals (fs.filepath (l) + r, os.path.join (l, r))
def test_radd (self):
for l in self.left:
for r in self.right:
self.assertEquals (l + fs.filepath (r), os.path.join (l, r))
def test_is_relative (self):
for path, result in [
("c:/", False),
("c:/temp/abc.txt", False),
("temp", True),
("c:temp", True),
(r"\\server\share", False),
(r"\\server\share\d1", False)
]:
self.assertEquals (fs.filepath (path).is_relative (), result)
def test_absolute (self):
for path in ["c:/temp", "temp", "c:temp", r"\\server\share\d1"]:
self.assertEquals (fs.filepath (path).absolute ().lower (), os.path.abspath (path).lower ())
#
# The .changed method returns a version of the filepath
# with one or more of its components changed. Certain
# combinations are pointless and raise an exception.
#
def test_changed_filename_and_base (self):
with self.assertRaises (fs.x_fs):
fs.filepath (".").changed (filename="test.txt", base="test")
def test_changed_filename_and_ext (self):
with self.assertRaises (fs.x_fs):
fs.filepath (".").changed (filename="test.txt", ext=".txt")
def test_changed_filename_and_infix (self):
with self.assertRaises (fs.x_fs):
fs.filepath (".").changed (filename="test.txt", infix="-test-")
def test_changed_root (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (root="d:\\"), "d:\\temp\\abc.txt")
def test_changed_dirname (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (dirname="temp2"), "c:\\temp2\\abc.txt")
def test_changed_filename (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (filename="def.ghi"), "c:\\temp\\def.ghi")
def test_changed_base (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (base="def"), "c:\\temp\\def.txt")
def test_changed_infix (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (infix=".infix"), "c:\\temp\\abc.infix.txt")
def test_changed_ext (self):
self.assertEquals (fs.filepath ("c:\\temp\\abc.txt").changed (ext=".ext"), "c:\\temp\\abc.ext")
#
# dumps
#
def test_dump_absolute (self):
with utils.fake_stdout ():
fs.filepath (__file__).dump ()
def test_dump_relative (self):
with utils.fake_stdout ():
fs.filepath ("@@").dump ()
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press enter...")
| mit | -304,891,808,880,446,500 | 32.280193 | 110 | 0.533749 | false |
informatik-mannheim/Moduro-CC3D | Simulation/Steppable/ModuroSteppable.py | 1 | 1244 | # Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Markus Gumbel"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "[email protected]"
__status__ = "Production"
from PySteppables import SteppableBasePy
class ModuroSteppable(SteppableBasePy):
def __init__(self, simulator, model, _frequency=1):
SteppableBasePy.__init__(self, simulator, _frequency)
self.model = model
self.execConfig = model.execConfig
def step(self, mcs):
if not self.execConfig.interuptMCS(mcs):
self.moduroStep(mcs) # better: not MCS but time!
# Abstract method:
def moduroStep(self, mcs):
return None
| apache-2.0 | -3,116,914,015,083,291,600 | 34.542857 | 76 | 0.694534 | false |
jbogers/amino-epg-grabber | src/AminoEPGGrabber.py | 1 | 27978 | #!/usr/bin/env python
"""
A XMLTV compatible EPG grabber for the Amino EPG.
The grabber should function for any provider that supplies IPTV from Glashart Media.
"""
# Set program version
VERSION = "v0.5"
from datetime import datetime, date, timedelta
from lxml import etree
import pytz
import httplib
import socket
import StringIO
import gzip
import json
import cPickle
import os
import time
import inspect
import sys
#===============================================================================
# The internal data struture used in the AminoEPGGrabber to
# store the EPG data is as follows:
# (dict)
# epgData
# channelname:(dict)
# programid:(dict)
# starttime
# stoptime
# title
# sub-title
# desc
# actors []
# directors []
# categories []
#===============================================================================
GRABBERDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
class AminoEPGGrabber(object):
"""
Class AminoEPGGrabber implements the grabbing and processing
functionality needed for generating an XMLTV guide from the
supplied location.
"""
def __init__(self):
# Set up defaults
self.epgServer = "w1.zt6.nl"
self.maxDays = 7
self.details = True
self.downloadlogo = False
self.logoStore = None
self.xmltvFile = "aminoepg.xml"
self.databaseFile = "aminograbber.pkl"
self.channelDict = {}
self._timezone = pytz.timezone("Europe/Amsterdam")
self._epgdata = dict()
self._xmltv = None
self._epgConnection = None
self._foundLogos = dict()
#===============================================================================
# Getters and setters
#===============================================================================
def set_timezone(self, timezoneName):
"""Set the timezone we are working in, by name"""
self._timezone = pytz.timezone(timezoneName)
def get_timezone(self):
"""Return the name of the currently set timezone"""
return self._timezone.zone
timezone = property(get_timezone, set_timezone)
#===============================================================================
# Public functions
#===============================================================================
def loadConfig(self, configFile):
"""Load the configuration from the given config file"""
try:
configTree = etree.parse(configFile)
config = configTree.getroot()
if config.tag != "AminoEpgConfig":
print >> sys.stderr, "The config.xml file does not appear to be a valid AminoEPGGrabber configuration document."
sys.exit(1)
# Try to read each config tag
server = config.find("server")
if server != None:
value = server.text.strip()
if value != "":
self.epgServer = value
maxdays = config.find("maxdays")
if maxdays != None:
try:
value = int(maxdays.text)
if value < 7: # Make sure only value < 7 are set (7 is default)
self.maxDays = value
except ValueError:
pass # Invalid value, ignore
grabdetails = config.find("grabdetails")
if grabdetails != None:
value = grabdetails.text.lower()
if value == "false": # True is default, so override to false only
self.details = False
downloadlogo = config.find("downloadlogo")
if downloadlogo != None:
value = downloadlogo.text.lower()
if value == "true": # False is default, so override to false only
self.downloadlogo = True
if downloadlogo.attrib.has_key("location"):
location = downloadlogo.attrib["location"].strip()
if location != "":
self.logoStore = location
xmltvfile = config.find("xmltvfile")
if xmltvfile != None:
value = xmltvfile.text.strip()
if value != "":
self.xmltvFile = value
databasefile = config.find("databasefile")
if databasefile != None:
value = databasefile.text.strip()
if value != "":
self.databaseFile = value
channellist = config.find("channellist")
if channellist != None:
# Channel list found, parse all entries
channelDict = {}
for channel in channellist.findall("channel"):
# Skip channels that are missing an 'id'
if not channel.attrib.has_key("id"):
continue
# Add channel to channelDict (overwriting existing entry0
channelDict[channel.attrib["id"].strip()] = channel.text.strip()
# Replace default channel dict with loaded dict
self.channelDict = channelDict
except etree.XMLSyntaxError as ex:
print >> sys.stderr, "Error parsing config.xml file: %s" % ex
sys.exit(1) # Quit with error code
except EnvironmentError as ex:
print >> sys.stderr, "Error opening config.xml file: %s" % ex
sys.exit(1) # Quit with error code
def loadDatabase(self):
"""
This function will load a database file into memory.
It will overwrite the current in-memory data
"""
# Only load if file exists
databaseFile = os.path.join(GRABBERDIR, self.databaseFile)
if os.path.isfile(databaseFile):
dbFile = open(databaseFile, "r")
self._epgdata = cPickle.load(dbFile)
dbFile.close()
# Remove channels that are not in the channel list
if len(self.channelDict) > 0:
for channel in self._epgdata.keys():
if not self.channelDict.has_key(channel):
del self._epgdata[channel]
# Determine current date
today = date.today()
# Remove programs that stopped before 'now'
for _, programs in self._epgdata.iteritems():
for programId in programs.keys():
stopDate = datetime.strptime(programs[programId]["stoptime"][:8], "%Y%m%d").date()
if stopDate < today:
# Remove program
del programs[programId]
else:
# Set program as not grabbed
programs[programId]["grabbed"] = False
def writeDatabase(self):
"""
This function will write the current in-memory EPG data to
a database file.
NOTE: Programs not found in the downloaded EPG will not be saved!
"""
# Clean up old data (programs that weren't grabbed)
for _, programs in self._epgdata.iteritems():
for programId in programs.keys():
if not programs[programId].has_key("grabbed") or \
not programs[programId]["grabbed"]:
del programs[programId]
# Write dictionary to disk
databaseFile = os.path.join(GRABBERDIR, self.databaseFile)
dbFile = open(databaseFile, "w")
cPickle.dump(self._epgdata, dbFile)
dbFile.close()
def grabEpg(self):
"""
This function will grab the EPG data from the EPG server.
If an existing database file was loaded, that data will be updated.
"""
# Report settings to user
print "Grabbing EPG using the following settings:"
print "Server to download from: %s" % self.epgServer
print "Number days of to grab : %s" % self.maxDays
print "Detailed program info : %s" % ("Yes" if self.details else "No")
print "Download channel logo : %s" % ("Yes" if self.downloadlogo else "No")
print "Writing XMLTV file to : %s" % self.xmltvFile
print "Using database file : %s" % self.databaseFile
print "Grabbing EPG for %d channels." % len(self.channelDict)
print ""
# Grab EPG data for all days
for grabDay in range(self.maxDays):
for dayPart in range(0, 8):
grabDate = date.today() + timedelta(days=grabDay)
print "Grabbing", str(grabDate), "part", dayPart,
print "(day " + str(grabDay+1) + "/" + str(self.maxDays) + ")"
try:
# Set up new connection to EPG server
self._epgConnection = httplib.HTTPConnection(self.epgServer)
# Get basic EPG
fileId = grabDate.strftime("%Y%m%d.") + str(dayPart)
requestUrl = "/epgdata/epgdata." + fileId + ".json.gz"
try:
self._epgConnection.request("GET", requestUrl)
response = self._epgConnection.getresponse()
epgData = response.read()
response.close()
if response.status != 200:
print "HTTP Error %s (%s). Failed on fileid %s." % (response.status,
response.reason,
fileId)
break # break loop, no more days
except socket.error, error:
print "Failed to download '" + fileId + "'"
print "The error was:", error
return False # Return with error
except httplib.CannotSendRequest, error:
print "Error occurred on HTTP connection. Connection lost before sending request."
print "The error was:", error
return False # Return with error
except httplib.BadStatusLine, error:
print "Error occurred on HTTP connection. Bad status line returned."
print "The error was:", error
return False # Return with error
# Decompress and retrieve data
compressedStream = StringIO.StringIO(epgData)
rawData = gzip.GzipFile(fileobj=compressedStream).read()
basicEpg = json.loads(rawData, "UTF-8")
# Close StringIO
compressedStream.close()
# Process basic EPG
self._processBasicEPG(basicEpg)
finally:
# Make sure connection gets closed
self._epgConnection.close()
self._epgConnection = None
return True # Return with success
def writeXmltv(self):
"""
This function will write the current in-memory EPG data to an XMLTV file.
NOTE: Programs not found in the downloaded EPG will not be saved!
"""
# Set up XML tree and create main <TV> tag
self._xmltv = etree.Element("tv",
attrib = {"source-info-url" : self.epgServer,
"source-info-name" : "Local amino EPG server",
"generator-info-name" : "AminoEPGGrabber %s (C) 2012 Jeroen Bogers" % VERSION,
"generator-info-url" : "http://gathering.tweakers.net"}
)
# Add channels to XML
for channel in sorted(self._epgdata.keys()):
channelTag = etree.Element("channel", id = channel)
channelDisplayNameTag = etree.Element("display-name", lang = "nl")
if self.channelDict.has_key(channel):
channelDisplayNameTag.text = self.channelDict[channel]
else:
channelDisplayNameTag.text = channel
channelTag.append(channelDisplayNameTag)
# Add icon link, if available
if self._foundLogos.has_key(channel):
logoLink = "file://%s" % self._foundLogos[channel]
channelIconTag = etree.Element("icon", src = logoLink)
channelTag.append(channelIconTag)
self._xmltv.append(channelTag)
# Add programs to XML
for channel, programs in sorted(self._epgdata.items()):
for _, program in sorted(programs.items()):
self._xmltv.append(self._getProgramAsElement(channel, program))
# Write XMLTV file to disk
xmltvFile = os.path.join(GRABBERDIR, self.xmltvFile)
outFile = open(xmltvFile, "w")
outFile.write(etree.tostring(self._xmltv, pretty_print = True, xml_declaration = True, encoding='UTF-8'))
outFile.close()
#===============================================================================
# Private functions
#===============================================================================
def _processBasicEPG(self, basicEpg):
"""
Takes the loaded EPG data and converts it to the in-memory
structure. If the program is not in memory, or differs from
the in memory data, the details are retrieved.
"""
for channel, grabbedPrograms in basicEpg.iteritems():
# Ignore channels not in the channel list (if given)
if len(self.channelDict) > 0 and not self.channelDict.has_key(channel):
continue
# Check if data for channel is loaded yet
if not self._epgdata.has_key(channel):
self._epgdata[channel] = dict()
# Check if channel icon needs to be downloaded
if self.downloadlogo:
self._getLogo(channel)
# Store all program data
for grabbedProgram in grabbedPrograms:
# Convert to internal structure
try:
programId = grabbedProgram["id"]
program = dict()
program["grabbed"] = True
program["starttime"] = self._convertTimestamp(grabbedProgram["start"])
program["stoptime"] = self._convertTimestamp(grabbedProgram["end"])
program["title"] = grabbedProgram["name"]
except KeyError:
# Program with incomplete data (most likely missing 'name').
# Cannot create valid XMLTV entry, so skip (data will be updated on a next run when it is available)
continue
# Add every program to the internal data structure
if self._epgdata[channel].has_key(programId):
# Existing program, verify it has not been changed
stored = self._epgdata[channel][programId]
if stored["starttime"] == program["starttime"] and \
stored["stoptime"] == program["stoptime"] and \
stored["title"] == program["title"]:
# Mark stored program as 'grabbed' and skip to next
stored["grabbed"] = True
continue
else:
# Changed program, remove from storage and grab new data
del self._epgdata[channel][programId]
# New program or program with changes, get details
if self.details:
self._grabDetailedEPG(programId, program)
# Add program to internal storage
self._epgdata[channel][programId] = program
def _grabDetailedEPG(self, programId, program):
"""Download the detailed program data for the specified program"""
# Generate details URL
programIdGroup = programId[-2:]
detailUrl = "/epgdata/" + programIdGroup + "/" + programId + ".json"
# Try to download file
try:
self._epgConnection.request("GET", detailUrl)
response = self._epgConnection.getresponse()
if response.status != 200:
response.read() # Force response buffer to be emptied
response.close()
return # No data can be downloaded, return
except (socket.error, httplib.CannotSendRequest, httplib.BadStatusLine):
# Error in connection. Close existing connection.
self._epgConnection.close()
# Wait for network to recover
time.sleep(10)
# Reconnect to server and retry
try:
self._epgConnection = httplib.HTTPConnection(self.epgServer)
self._epgConnection.request("GET", detailUrl)
response = self._epgConnection.getresponse()
if response.status != 200:
response.read() # Force response buffer to be emptied
response.close()
return # No data can be downloaded, return
except (socket.error, httplib.CannotSendRequest, httplib.BadStatusLine):
# Connection remains broken, return (error will be handled in grabEpg function)
return
detailEpg = json.load(response, "UTF-8")
response.close()
# Episode title
if detailEpg.has_key("episodeTitle") and len(detailEpg["episodeTitle"]) > 0:
program["sub-title"] = detailEpg["episodeTitle"]
# Detailed description
if detailEpg.has_key("description") and len(detailEpg["description"]) > 0:
program["desc"] = detailEpg["description"]
# Credits
program["credits"] = dict()
if detailEpg.has_key("actors") and len(detailEpg["actors"]) > 0:
program["credits"]["actor"] = []
for actor in detailEpg["actors"]:
program["credits"]["actor"].append(actor)
if detailEpg.has_key("directors") and len(detailEpg["directors"]) > 0:
program["credits"]["director"] = []
for director in detailEpg["directors"]:
program["credits"]["director"].append(director)
if detailEpg.has_key("presenters") and len(detailEpg["presenters"]) > 0:
program["credits"]["presenter"] = []
for presenter in detailEpg["presenters"]:
program["credits"]["presenter"].append(presenter)
if detailEpg.has_key("commentators") and len(detailEpg["commentators"]) > 0:
program["credits"]["commentator"] = []
for presenter in detailEpg["commentators"]:
program["credits"]["commentator"].append(presenter)
# Genres
if detailEpg.has_key("genres") and len(detailEpg["genres"]) > 0:
program["categories"] = []
for genre in detailEpg["genres"]:
program["categories"].append(genre)
# Aspect ratio
if detailEpg.has_key("aspectratio") and len(detailEpg["aspectratio"]) > 0:
program["aspect"] = detailEpg["aspectratio"]
# TODO: NICAM ratings (nicamParentalRating and nicamWarning)
def _getProgramAsElement(self, channel, program):
"""Returns the specified program as an LXML 'Element'"""
# Construct programme tag
programmeTag = etree.Element("programme",
start = program["starttime"],
stop = program["stoptime"],
channel = channel)
# Construct title tag
titleTag = etree.Element("title", lang = "nl")
titleTag.text = program["title"]
programmeTag.append(titleTag)
# Subtitle
if program.has_key("sub-title"):
# Add sub-title tag
subtitleTag = etree.Element("sub-title", lang = "nl")
subtitleTag.text = program["sub-title"]
programmeTag.append(subtitleTag)
# Description
if program.has_key("desc"):
# Add desc tag
descriptionTag = etree.Element("desc", lang = "nl")
descriptionTag.text = program["desc"]
programmeTag.append(descriptionTag)
# Credits (directors, actors, etc)
if program.has_key("credits") and len(program["credits"]) > 0:
# Add credits tag
creditsTag = etree.Element("credits")
# Add tags for each type of credits (in order, so XMLTV stays happy)
#creditTypes = ["director", "actor", "writer", "adapter",
# "producer", "composer", "editor", "presenter",
# "commentator", "guest"]
creditTypes = ["director", "actor", "presenter", "commentator"]
creditsDict = program["credits"]
for creditType in creditTypes:
if creditsDict.has_key(creditType):
for person in creditsDict[creditType]:
personTag = etree.Element(creditType)
personTag.text = person
creditsTag.append(personTag)
programmeTag.append(creditsTag)
# Categories
if program.has_key("categories"):
# Add multiple category tags
for category in program["categories"]:
categoryTag = etree.Element("category", lang = "nl")
categoryTag.text = category
programmeTag.append(categoryTag)
# Aspect ratio
if program.has_key("aspect"):
# Add video tag, containing aspect tag
videoTag = etree.Element("video")
aspectTag = etree.Element("aspect")
aspectTag.text = program["aspect"]
videoTag.append(aspectTag)
programmeTag.append(videoTag)
return programmeTag
def _convertTimestamp(self, timestamp):
"""Convert downloaded timestamp to XMLTV compatible time string"""
startTime = datetime.fromtimestamp(timestamp, self._timezone)
return startTime.strftime("%Y%m%d%H%M%S %z")
def _getLogo(self, channel):
"""Check if there is a logo for the given channel, and (try) to download it if needed"""
# Check that log has not been verified already
if self._foundLogos.has_key(channel):
return
# Prepare paths needed for the logo
if self.logoStore is not None:
localLogoDir = os.path.join(GRABBERDIR, self.logoStore)
else:
localLogoDir = os.path.join(GRABBERDIR, "logos")
logoName = "%s.png" % channel
localLogo = os.path.join(localLogoDir, logoName)
remoteLogo = "/tvmenu/images/channels/%s.png" % channel
# Check that logo does not already exist
if os.path.isfile(localLogo):
# Found logo, store and return
self._foundLogos[channel] = localLogo
return
# Logo not found, try to download it
try:
self._epgConnection.request("GET", remoteLogo)
response = self._epgConnection.getresponse()
if response.status != 200:
# Logo cannot be found, set to ignore it
self._foundLogos[channel] = None
response.read() # Force response buffer to be emptied
response.close()
return
except (socket.error, httplib.CannotSendRequest, httplib.BadStatusLine):
# Error in connection. Close existing connection.
self._epgConnection.close()
# Wait for network to recover
time.sleep(10)
# Reconnect to server and retry
try:
self._epgConnection = httplib.HTTPConnection(self.epgServer)
self._epgConnection.request("GET", remoteLogo)
response = self._epgConnection.getresponse()
if response.status != 200:
# Logo cannot be found, set to ignore it
self._foundLogos[channel] = None
response.read() # Force response buffer to be emptied
response.close()
return
except (socket.error, httplib.CannotSendRequest, httplib.BadStatusLine):
# Connection remains broken, return (error will be handled in grabEpg function)
self._foundLogos[channel] = None
return
# Logo downloaded, store to disk
try:
if not os.path.isdir(localLogoDir):
os.makedirs(localLogoDir)
with open(localLogo, "wb") as logoFile:
logoFile.write(response.read())
response.close()
self._foundLogos[channel] = localLogo
except EnvironmentError:
# Could not store logo, set to ignore it
self._foundLogos[channel] = None
def main():
"""
Main entry point of program.
This function will read the configuration file and start the grabber.
"""
print "AminoEPGGrabber %s started on %s." % (VERSION, datetime.now())
# Create grabber class
grabber = AminoEPGGrabber()
# Try to load config file, if it exists
configFile = os.path.join(GRABBERDIR, "config.xml")
if os.path.isfile(configFile):
grabber.loadConfig(configFile)
# Load saved database
grabber.loadDatabase()
# Grab EPG from IPTV network
grabber.grabEpg()
# Write database
grabber.writeDatabase()
# Write XMLTV file
grabber.writeXmltv()
print "AminoEPGGrabber finished on %s." % datetime.now()
if __name__ == "__main__":
main()
| gpl-3.0 | 2,449,852,546,226,054,700 | 41.109399 | 128 | 0.501358 | false |
jkoelker/investing | picloud/magicformula/predict.py | 1 | 3948 | #!/usr/bin/env python
import argparse
import sys
import MySQLdb
import pandas as pd
import twitter
def publish_to_twitter(df, prefix='MF', api=None, **kwargs):
if api is None:
api = twitter.Api(**kwargs)
msg = ' '.join(['$%s' % s for s in df.T.index])
msg = '%s: %s' % (prefix, msg)
if len(msg) > 140:
return publish_to_twitter(df[:-1], prefix, api, **kwargs)
return api.PostUpdate(msg)
def rank_stocks(df):
roa_key = 'roa_ttm'
pe_key = 'trailing_pe'
roa_rank = 'return_rank'
pe_rank = 'pe_rank'
df[pe_rank] = df[pe_key].rank(method='min')
df[roa_rank] = df[roa_key].rank(method='min', ascending=0)
return df.sort_index(by=[pe_rank, roa_rank], ascending=[1, 1])
def get_stocks(db_kwargs):
qry = """
SELECT t.ticker, f.*, MAX(f.refresh_dt)
FROM fundamentals f, tickers t
WHERE f.ticker_id = t.id
AND f.refresh_dt BETWEEN DATE_SUB(NOW(), INTERVAL 1 WEEK) AND NOW()
AND t.sector NOT IN ('Financial', 'Utilities')
AND t.industry NOT IN ('Independent Oil & Gas',
'Major Integrated Oil & Gas',
'Oil & Gas Drilling & Exploration'
'Oil & Gas Equipment & Services',
'Oil & Gas Pipelines',
'Oil & Gas Refining & Marketing')
AND f.roa_ttm >= 0.25
AND f.trailing_pe >= 5
AND f.market_cap >= 30000000
GROUP BY f.ticker_id
"""
conn = MySQLdb.connect(**db_kwargs)
df = pd.read_sql(qry, conn, index_col='ticker')
conn.close()
return df
def predict(num_stocks, db_kwargs, twitter_kwargs):
stocks = get_stocks(db_kwargs)
rank = rank_stocks(stocks)
return publish_to_twitter(rank[:num_stocks].T, **twitter_kwargs)
def main():
parser = argparse.ArgumentParser(description='Run MagicFormula Prediction',
add_help=False)
parser.add_argument('-k', '--consumer-key',
required=True,
help='Twitter application consumer key')
parser.add_argument('-s', '--consumer-secret',
required=True,
help='Twitter application consumer secret')
parser.add_argument('-K', '--access-token-key',
required=True,
help='Twitter User access token key')
parser.add_argument('-S', '--access-token-secret',
required=True,
help='Twitter User access token secret')
parser.add_argument('-n', '--num_stocks',
default=15,
type=int,
help='Number of stocks to publish')
parser.add_argument('-h', '--host',
required=True,
help='MySQL host')
parser.add_argument('-u', '--user',
required=True,
help='MySQL User')
parser.add_argument('-p', '--password',
required=True,
help='MySQL password')
parser.add_argument('database',
help='Database to store tickers in')
parser.add_argument('--help',
action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
args = parser.parse_args()
db_kwargs = {'host': args.host,
'user': args.user,
'passwd': args.password,
'db': args.database}
twitter_kwargs = {'consumer_key': args.consumer_key,
'consumer_secret': args.consumer_secret,
'access_token_key': args.access_token_key,
'access_token_secret': args.access_token_secret}
if predict(args.num_stocks, db_kwargs, twitter_kwargs):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
| mit | 5,124,949,464,170,065,000 | 32.457627 | 79 | 0.526342 | false |
mucximilian/gimpmaps | gimprenderer/sketching/geometry.py | 1 | 17067 | '''
Created on Jun 11, 2015
@author: mucx
# TO DO:
- Adding classes:
- Point?
- Polygon
- Multipolygon support
'''
from __future__ import division
from abc import ABCMeta, abstractmethod
import math
import sys
class Geometry(object):
"""
An abstract class defining the base geometry object
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
Constructor
"""
class Line(Geometry):
"""
An abstract class defining the connection between points
"""
__metaclass__ = ABCMeta
def __init__(self, coordinates):
"""
:param coordinates: A list of lne point coordinate tuples.
"""
# Check that line consists only of two points
if len(coordinates) > 2:
print coordinates
sys.exit("Too many points for simple line - interrupted.")
else:
self.coords = coordinates
def as_wkt(self, line):
"""
Returns an list of coordinate pair arrays in WKT notation.
"""
line_wkt = "LINESTRING ("
for p in self.coords:
line_wkt += str(p[0]) + " " + str(p[1]) + ", "
line_wkt = line_wkt[:-2] + ")"
return line_wkt
@abstractmethod
def length(self):
raise NotImplementedError
class LineSimple(Line):
"""
A class defining the straight connection between two points. The point
that is closer to the origin as the first point.
"""
def __init__(self, coordinates):
"""
:param coordinates: A list of coordinate tuples
"""
super(LineSimple, self).__init__(coordinates)
def length(self):
"""
Calculates the distance between the two line points using the
pythagorean theorem.
"""
d_x = math.fabs(self.coords[0][0] - self.coords[1][0])
d_y = math.fabs(self.coords[0][1] - self.coords[1][1])
l = math.sqrt(d_x**2 + d_y**2)
return l
def vector(self):
x = self.coords[1][0] - self.coords[0][0]
y = self.coords[1][1] - self.coords[0][1]
return (x, y)
def vector_orthogonal(self):
"""
Calculates an orthogonal vector to the line using the dot product.
Two vectors are orthogonal when their dot product is zero.
"""
v1 = self.vector()
v2 = None
try:
v2_y = -v1[0] / v1[1]
v2 = (1, v2_y)
except ZeroDivisionError:
v2 = (0, 1)
return v2
def get_delta(self):
"""
Returns the x or y distance between the two line points based on the
equation parameter (which determines the ascent of the line)
"""
delta = None
eq_params = self.get_line_equation_params()
if eq_params is not None:
delta = self.coords[0][0] - self.coords[1][0] # delta x
else:
delta = self.coords[0][1] - self.coords[1][1] # delta y
return delta
def get_line_equation_params(self):
"""
Identifies the line equation y = mx + b for a line which is determined
by two points.
:param line: Line class determining a line by two points (coordinate
tuple array)
"""
x1 = self.coords[0][0]
y1 = self.coords[0][1]
x2 = self.coords[1][0]
y2 = self.coords[1][1]
delta_x = x1 - x2
delta_y = y1 - y2
if (delta_x == 0):
return None # Vertical line
else:
m = (delta_y)/(delta_x)
b = y1 - m * x1
return [m,b]
def point_at_line_pos(self, p, reverse = False):
"""
Calculating the point at the position t * AB on the line from point A
to point B.
:param: Relative position between A and B (0 is at A, 0.5 middle, 1 is at B)
:param reverse: False, position between A and B, True between B and A.
Default is False
"""
a = self.coords[0]
b = self.coords[1]
p1 = None
p2 = None
if reverse:
p1 = b
p2 = a
else:
p1 = a
p2 = b
x = (1-p) * p1[0] + p * p2[0];
y = (1-p) * p1[1] + p * p2[1];
return (x,y)
def point_orthogonal(self, pos, d):
"""
Displaces a point P which is located on a line at a relative position d
between A and B orthogonally within a distance d.
.
:param pos: Relative position of the point between A and B (0...1)
:param d: Distance the point is displaced orthogonally
"""
p = self.point_at_line_pos(pos)
v = self.vector_orthogonal()
shift = [(p[0], p[1]), (v[0] + p[0], v[1] + p[1])]
shift_line = LineSimple(shift)
p_displaced = shift_line.point_shifted(d)
return p_displaced
def point_shifted(self, d):
"""
Computes the point that is on the straight line between A and B and
the distance d away from B.
:param line: Tuple of two coordinate pairs determining the line points.
"""
line_vector = self.vector()
length = self.length()
shift = tuple((d / length) * x for x in line_vector)
point_shifted = tuple(sum(t) for t in zip(self.coords[0], shift))
return point_shifted
def line_scale(self, d_abs = None, d_rel = None):
"""
Equally scaling (extending or shortening at both endpoints) the line
either with using a relative or absolute value. Returns the new
endpoints as a tuple.
:param d_abs: Scaling
:param d_rel:
"""
d = 0
if (d_abs is not None and d_rel is None):
d = d_abs
elif (d_rel is not None and d_abs is None):
d = d_rel * self.length()
else:
d = d_abs
print "Two d values provied for line scaling - absolute value used"
a_new = self.point_shifted(-d)
# Using reversed line coordinates
coords_reversed = self.coords[::-1]
line_reversed = LineSimple(coords_reversed)
b_new = line_reversed.point_shifted(-d)
return (a_new, b_new)
class LineString(Line):
def __init__(self, coordinates):
"""
:param coordinates: A list of coordinate tuples
"""
self.coords = coordinates
self.curve = None
def length(self):
length_total = 0
for i in range(1, len(self.coords)):
line = LineSimple([self.coords[i], self.coords[i - 1]])
length_total += line.length()
return length_total
def simple_bezier(self, t = 1.0):
"""
Returns a Bezier curve in SVG from a sequence of points and control
points in an array.
"""
def get_controlpoints(point_triple, t = 1.0):
"""
Given three consecutive points on a line (P0, P1, P2), this function
calculates the Bezier control points of P1 using the technique
explained by Rob Spencer.
Source: http://scaledinnovation.com/analytics/splines/aboutSplines.html
"""
x0 = point_triple[0][0]
y0 = point_triple[0][1]
x1 = point_triple[1][0]
y1 = point_triple[1][1]
x2 = point_triple[2][0]
y2 = point_triple[2][1]
d01 = math.sqrt(math.pow(x1 - x0, 2) + math.pow(y1 - y0, 2))
d12 = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
fa = t * d01 / (d01 + d12) # scaling factor for triangle Ta
fb = t * d12 / (d01 + d12) # ditto for Tb, simplifies to fb=t-fa
p1x = x1 - fa * (x2 - x0) # x2-x0 is the width of triangle T
p1y = y1 - fa * (y2 - y0) # y2-y0 is the height of T
p2x = x1 + fb * (x2 - x0)
p2y = y1 + fb * (y2 - y0)
return [[p1x,p1y],[p2x,p2y]];
########################################################################
controlpoints = []
controlpoints.append([self.coords[0][0], self.coords[0][1]])
for i in range(1, len(self.coords)-1):
point_triple = [self.coords[i-1], self.coords[i], self.coords[i+1]]
cps_point = get_controlpoints(point_triple, t)
controlpoints.append([cps_point[0][0], cps_point[0][1]])
controlpoints.append([cps_point[1][0], cps_point[1][1]])
last = len(self.coords)-1
controlpoints.append([self.coords[last][0], self.coords[last][1]])
curve = self._get_curve(controlpoints)
self.curve = curve
return curve
def catmull_rom_bezier(self, t = 1.0):
"""
Returns a SVG Bezier curve of a line with the given points.
Source: http://schepers.cc/getting-to-the-point
Catmull-Rom to Cubic Bezier conversion matrix
0 1 0 0
-1/6 1 1/6 0
0 1/6 1 -1/6
0 0 1 0
"""
controlpoints = []
point_count = len(self.coords)
for i in range(0, point_count-1):
# Creating an array of relevant knot points
p = []
if ( 0 == i ):
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+2][0], self.coords[i+2][1]])
elif (len(self.coords) - 2 == i ):
p.append([self.coords[i-1][0], self.coords[i-1][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
else:
p.append([self.coords[i-1][0], self.coords[i-1][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+2][0], self.coords[i+2][1]])
# Calculating the bezier points from the knot points
bp = [];
# This assignment is for readability only
x0 = p[0][0]
y0 = p[0][1]
x1 = p[1][0]
y1= p[1][1]
x2 = p[2][0]
y2 = p[2][1]
x3 = p[3][0]
y3= p[3][1]
# Using the factor t as "tension control"
f = (1 / t) * 6
bp.append([x1, y1])
bp.append([
((-x0 + f*x1 + x2) / f),
((-y0 + f*y1 + y2) / f)
])
bp.append([
((x1 + f*x2 - x3) / f),
((y1 + f*y2 - y3) / f)
])
bp.append([x2, y2])
controlpoints.append([bp[1][0], bp[1][1]])
controlpoints.append([bp[2][0], bp[2][1]])
#print controlpoints
curve = self.get_curve(controlpoints)
self.curve = curve
return curve
def get_curve(self, cps):
"""
Creates a coordinate array of points and control points that can be
used as a SVG path.
:param cps: An array of control points coordinates.
"""
# Checking every linepoint after the start point for two control points
if (len(self.coords) - 1) != (len(cps) / 2):
print "coords: " + str(len(self.coords))
print "cps: " + str(len(cps))
sys.exit("Curve cannot be created - control point error:")
else:
# Adding first point
curve = [self.coords[0]]
# Adding remaining points
for i in range(0, len(self.coords) -1):
cp_pos = i * 2
curve.append(cps[cp_pos])
curve.append(cps[cp_pos + 1])
curve.append(self.coords[i + 1])
return curve
class Polygon(object):
"""
Classdocs
"""
def __init__(self, linearrings):
"""
:param coordinates: A list of coordinate tuples
"""
self.linearrings = linearrings
def disjoin(self, angle_disjoin = 120.0):
"""
Disjoins polygon linestrings into segments at vertices where the angle
between the lines from the vertex to the vertex behind and the vertex
to the vertex ahead exceeds a given threshold. Returns the calculated
line segments as an array.
:param polygon: Input geometry, array of lines (arrays of coordinates)
:param angle_disjoin: Threshold angle for disjoin in degree.
"""
def three_point_angle(points):
"""
Calculates the angle between the lines from a vertex to the vertex
behind and the vertex to the vertex ahead.
:param points: Coordinate array, containing three points
(vertex behind, vertex, vertex ahead)
"""
angle = 0
try:
p0 = points[0] # point_behind
p1 = points[1] # point_center
p2 = points[2] # point_ahead
a = (p1[0] - p0[0])**2 + (p1[1] - p0[1])**2
b = (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2
c = (p2[0] - p0[0])**2 + (p2[1] - p0[1])**2
angle = math.acos((a + b - c) / math.sqrt(4 * a * b)) * 180/math.pi
"""
# Determine whether the edges are convex or concave
v1 = LineSimple([p0, p1]).vector()
v2 = LineSimple([p1, p2]).vector()
det = v1[0]*v2[1] - v2[0]*v1[1] # det is negative if concave
if det < 0:
angle = 360 - angle
Nice but useless...
"""
except ZeroDivisionError:
print "Angle is zero...probably duplicate points"
return angle
########################################################################
outline_segments = []
# Get linearrings of multipolygons
for linearring in self.linearrings:
segment = []
segment.append(linearring[0])
# Iterate over all points of linearring
for i in range(1, len(linearring) -1):
points = []
points.append(linearring[i - 1])
points.append(linearring[i])
points.append(linearring[i + 1])
angle = three_point_angle(points)
# Check if duplicate points exist (due to coordinate rounding)
if (angle == 0):
# Skipping duplicate points
if linearring[i] == linearring[i + 1]:
continue
if linearring[i] == linearring[i - 1]:
continue
# Continue segment
if (angle > angle_disjoin):
segment.append(linearring[i])
# Finish segment and create new one
else:
segment.append(linearring[i])
outline_segments.append(segment)
segment = []
segment.append(linearring[i])
segment.append(linearring[0])
outline_segments.append(segment)
return outline_segments | gpl-2.0 | 3,766,000,800,359,044,000 | 30.089253 | 84 | 0.457315 | false |
chrsrds/scikit-learn | examples/model_selection/plot_grid_search_digits.py | 7 | 2706 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause | 537,184,166,535,805,440 | 33.692308 | 78 | 0.656689 | false |
lekston/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py | 1 | 38099 | #!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# list of ROMFS files
romfs = []
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
alt_map = lib.AltFunction_map
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS'))):
# default RX/TX lines to pullup, to prevent spurious bytes
# on disconnected ports. CTS is the exception, which is pulldown
if self.label.endswith("CTS"):
v = "PULLDOWN"
else:
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_config(name, column=0, required=True, default=None, type=None):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
ret = config[name][column]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
f.write('#define %s\n\n' % get_config('MCU', 1))
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO') or have_type_prefix('SDMMC'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN'):
enable_can(f)
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
f.write('#define CRT1_AREAS_NUMBER 1\n')
if mcu_type in ['STM32F427xx', 'STM32F407xx','STM32F405xx']:
def_ccm_size = 64
else:
def_ccm_size = None
ccm_size = get_config(
'CCM_RAM_SIZE_KB', default=def_ccm_size, required=False, type=int)
if ccm_size is not None:
f.write('#define CCM_RAM_SIZE %u\n' % ccm_size)
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % flash_reserve_start)
f.write('\n')
lib = get_mcu_lib(mcu_type)
build_info = lib.build
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram size
ram_size = get_config('RAM_SIZE_KB', default=192, type=int)
flash_base = 0x08000000 + flash_reserve_start * 1024
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
print("Generating ldscript.ld")
f = open(fname, 'w')
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x20000000, len = %uk
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram_size))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return;
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default="%BOARD%"))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_TX_DMA_STREAM, STM32_SPI_SPI%u_RX_DMA_STREAM }\n'
% (n, n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def write_UART_config(f):
'''write UART config defines'''
get_config('UART_ORDER')
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
for dev in uart_list:
idx = uart_list.index(dev)
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
for idx in range(len(uart_list), 6):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s}\n" %
(dev, dev, rts_line))
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver:
f.write('#define HAL_USE_SERIAL FALSE\n')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('#define HAL_USE_I2C FALSE\n')
return
if not 'I2C_ORDER' in config:
error("Missing I2C_ORDER config")
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
if dev + "_SCL" in bylabel:
p = bylabel[dev + "_SCL"]
f.write(
'#define HAL_%s_SCL_AF %d\n' % (dev, p.af)
)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write(
'#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM }\n'
% (n, n, n, n))
f.write('#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out:
print("No PWM output defined")
f.write('#define HAL_USE_PWM FALSE\n')
if rc_in is not None:
a = rc_in.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if chan_str[-1] == 'N':
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
chan_str = chan_str[:-1]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % rc_in.label)
if int(chan_str) not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
n = int(a[0][3:])
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % int(chan_str))
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, int(chan_str)))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, int(chan_str)))
f.write('\n')
if rc_in_int is not None:
a = rc_in_int.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % rc_in.label)
n = int(a[0][3:])
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % int(chan_str))
f.write('\n')
if alarm is not None:
a = alarm.label.split('_')
chan_str = a[1][2:]
timer_str = a[0][3:]
if not is_int(chan_str) or not is_int(timer_str):
error("Bad timer channel %s" % alarm.label)
n = int(timer_str)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for ALARM PWM %s" % (chan, p))
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [ 0, 0, 0, 0 ]
pal_lines = [ '0', '0', '0', '0' ]
for p in pwm_out:
if p.type != t:
continue
chan_str = p.label[7]
is_complementary = p.label[-1] == 'N';
if not is_int(chan_str):
error("Bad channel for PWM %s" % p)
chan = int(chan_str)
if chan not in [1, 2, 3, 4]:
error("Bad channel number %u for PWM %s" % (chan, p))
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if is_complementary:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#ifdef STM32_TIM_TIM%u_UP_DMA_STREAM
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU), EXT_MODE_GPIO%s }, /* %s */ \\\n' %
(gpio, pwm, port, pin, port, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
for l in sorted(bylabel.keys()):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
from embed import create_embedded_h
create_embedded_h(os.path.join(outdir, 'ap_romfs_embedded.h'), romfs)
def write_prototype_file():
'''write the prototype file for apj generation'''
pf = open(os.path.join(outdir, "apj.prototype"), "w")
pf.write('''{
"board_id": %s,
"magic": "PX4FWv1",
"description": "Firmware for the %s board",
"image": "",
"build_time": 0,
"summary": "PX4FMUv3",
"version": "0.1",
"image_size": 0,
"git_identity": "",
"board_revision": 0
}
''' % (get_config('APJ_BOARD_ID'),
get_config('APJ_BOARD_TYPE', default=mcu_type)))
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
f.write('#define STM32_SERIAL_USE_%-6s TRUE\n' % type)
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
''')
write_mcu_config(f)
write_USB_config(f)
write_I2C_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_peripheral_enable(f)
write_prototype_file()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default='TIM* SPI*'),
dma_noshare=get_config('DMA_NOSHARE',default=''))
write_PWM_config(f)
write_UART_config(f)
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
peripherals.append(type + "_TX")
peripherals.append(type + "_RX")
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if not label in peripherals:
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
if os.path.exists(defaults_filename):
print("Adding defaults.parm")
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def process_line(line):
'''process one line of pin definition file'''
global allpins
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
print("WARNING: Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type
mcu_type = a[2]
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'ROMFS':
romfs.append((a[1],a[2]))
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] in line:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
| gpl-3.0 | 4,494,559,686,549,286,400 | 32.65636 | 118 | 0.526785 | false |
TresysTechnology/setools | setoolsgui/rolemodel.py | 1 | 2087 | # Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette, QTextCursor
from setools.policyrep.exception import MLSDisabled
from .details import DetailsPopup
from .models import SEToolsTableModel
def role_detail(parent, role):
"""
Create a dialog box for role details.
Parameters:
parent The parent Qt Widget
role The role
"""
detail = DetailsPopup(parent, "Role detail: {0}".format(role))
types = sorted(role.types())
detail.append_header("Types ({0}): ".format(len(types)))
for t in types:
detail.append(" {0}".format(t))
detail.show()
class RoleTableModel(SEToolsTableModel):
"""Table-based model for roles."""
headers = ["Name", "Types"]
def data(self, index, role):
# There are two roles here.
# The parameter, role, is the Qt role
# The below item is a role in the list.
if self.resultlist and index.isValid():
row = index.row()
col = index.column()
item = self.resultlist[row]
if role == Qt.DisplayRole:
if col == 0:
return str(item)
elif col == 1:
return ", ".join(sorted(str(t) for t in item.types()))
elif role == Qt.UserRole:
# get the whole object
return item
| lgpl-2.1 | -2,426,512,159,329,464,300 | 28.814286 | 74 | 0.638237 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/gof/tests/test_toolbox.py | 2 | 2301 | from __future__ import absolute_import, print_function, division
from theano.gof.graph import Variable, Apply
from theano.gof.type import Type
from theano.gof.op import Op
from theano.gof.fg import FunctionGraph
from theano.gof.toolbox import NodeFinder
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(name), None, None)
class MyOp(Op):
__props__ = ("nin", "name")
def __init__(self, nin, name):
self.nin = nin
self.name = name
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyType(self.name + "_R")()]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
add = MyOp(2, 'Add')
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
class TestNodeFinder:
def test_straightforward(self):
x, y, z = inputs()
e0 = dot(y, z)
e = add(add(sigmoid(x), sigmoid(sigmoid(z))), dot(add(x, y), e0))
g = FunctionGraph([x, y, z], [e], clone=False)
g.attach_feature(NodeFinder())
assert hasattr(g, 'get_nodes')
for type, num in ((add, 3), (sigmoid, 3), (dot, 2)):
if not len([t for t in g.get_nodes(type)]) == num:
raise Exception("Expected: %i times %s" % (num, type))
new_e0 = add(y, z)
assert e0.owner in g.get_nodes(dot)
assert new_e0.owner not in g.get_nodes(add)
g.replace(e0, new_e0)
assert e0.owner not in g.get_nodes(dot)
assert new_e0.owner in g.get_nodes(add)
for type, num in ((add, 4), (sigmoid, 3), (dot, 1)):
if not len([t for t in g.get_nodes(type)]) == num:
raise Exception("Expected: %i times %s" % (num, type))
| agpl-3.0 | -5,614,573,700,113,669,000 | 25.147727 | 73 | 0.571491 | false |
davidfischer/rpc4django | docs/conf.py | 2 | 6764 | # -*- coding: utf-8 -*-
#
# RPC4Django documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 17 14:31:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(BASE_DIR)
# Get __version__ without loading rpc4django module
ns = {}
version_path = os.path.join(BASE_DIR, "rpc4django/version.py")
with open(version_path, "r", encoding="utf-8") as version_file:
exec(version_file.read(), ns)
rpc4django_version = ns["__version__"]
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RPC4Django'
copyright = u'%d, the respective authors' %datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = rpc4django_version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'RPC4Djangodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RPC4Django.tex', u'RPC4Django Documentation',
u'David Fischer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause | -7,409,543,119,766,741,000 | 31.363636 | 80 | 0.713779 | false |
c-square/homework | Licență/Anul III/CN/gauss/scripts/deploy.py | 1 | 1156 | #! /usr/bin/env python
""" Deploys a .pth file in site-packages for easy importing """
import distutils.sysconfig
import os
def deploy():
"""Deploy gauss"""
site = distutils.sysconfig.get_python_lib()
pth = os.path.join(site, 'gauss.pth')
if os.path.exists(pth):
print("[i] Module already exists!") # pylint: disable=C0325
else:
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print dirname
try:
with open(pth, 'w') as stream:
stream.write(dirname)
except IOError:
# pylint: disable=C0325
print("[x] Please run this script with superuser privileges.")
return
print("[i] Testing module...") # pylint: disable=C0325
try:
import gauss # pylint: disable=W0612
except ImportError as exc:
# pylint: disable=C0325
print("Failed to deploy module! {}".format(exc))
else:
# pylint: disable=C0325
print("[i] Module was successfully installed!")
if __name__ == "__main__":
deploy()
| mit | 3,215,219,101,553,088,000 | 30.243243 | 77 | 0.553633 | false |
jolyonb/edx-platform | lms/djangoapps/discussion/rest_api/tests/test_api.py | 1 | 128040 | """
Tests for Discussion API internal interface
"""
import itertools
from datetime import datetime, timedelta
from urllib import urlencode
from urlparse import parse_qs, urlparse, urlunparse
import ddt
import httpretty
import mock
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from opaque_keys.edx.locator import CourseLocator
from pytz import UTC
from rest_framework.exceptions import PermissionDenied
from common.test.utils import MockSignalHandlerMixin, disable_signal
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from lms.djangoapps.discussion.rest_api import api
from lms.djangoapps.discussion.rest_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread,
get_thread_list,
update_comment,
update_thread
)
from lms.djangoapps.discussion.rest_api.exceptions import (
CommentNotFoundError, DiscussionDisabledError, ThreadNotFoundError,
)
from lms.djangoapps.discussion.rest_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
make_paginated_api_response
)
from lms.djangoapps.discussion.django_comment_client.tests.utils import ForumsEnableMixin
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.lib.exceptions import CourseNotFoundError, PageNotFoundError
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the xblock.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
def _discussion_disabled_course_for(user):
"""
Create and return a course with discussions disabled.
The user passed in will be enrolled in the course.
"""
course_with_disabled_forums = CourseFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id)
_remove_discussion_tab(course_with_disabled_forums, user.id)
return course_with_disabled_forums
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name):
"""
Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions`
set to True (which is no longer the default value). Then 1) enrolls the user in that course,
2) creates a cohort that the user is placed in, and 3) adds the user to the given role.
Returns: a tuple of the created course and the created cohort
"""
cohort_course = CourseFactory.create(
cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True}
)
CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [user]
return [cohort_course, cohort]
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTest(ForumsEnableMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_course"""
@classmethod
def setUpClass(cls):
super(GetCourseTest, cls).setUpClass()
cls.course = CourseFactory.create(org="x", course="y", run="z")
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
get_course(self.request, _discussion_disabled_course_for(self.user).id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTestBlackouts(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Tests of get_course for courses that have blackout dates.
"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTestBlackouts, self).setUp()
self.course = CourseFactory.create(org="x", course="y", run="z")
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTopicsTest(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_xblock(self, topic_id, category, subcategory, **kwargs):
"""
Build a discussion xblock in self.course.
"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(DiscussionDisabledError):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_xblock("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "A", "1")
self.make_discussion_xblock("courseware-2", "A", "2")
self.make_discussion_xblock("courseware-3", "B", "1")
self.make_discussion_xblock("courseware-4", "B", "2")
self.make_discussion_xblock("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "First", "A", sort_key="D")
self.make_discussion_xblock("courseware-2", "First", "B", sort_key="B")
self.make_discussion_xblock("courseware-3", "First", "C", sort_key="E")
self.make_discussion_xblock("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_xblock("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_xblock("courseware-6", "Second", "C")
self.make_discussion_xblock("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.make_discussion_xblock("courseware-1", "First", "Everybody")
self.make_discussion_xblock(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_xblock(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_xblock("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_xblock(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
def test_discussion_topic(self):
"""
Tests discussion topic details against a requested topic id
"""
topic_id_1 = "topic_id_1"
topic_id_2 = "topic_id_2"
self.make_discussion_xblock(topic_id_1, "test_category_1", "test_target_1")
self.make_discussion_xblock(topic_id_2, "test_category_2", "test_target_2")
actual = get_course_topics(self.request, self.course.id, {"topic_id_1", "topic_id_2"})
self.assertEqual(
actual,
{
"non_courseware_topics": [],
"courseware_topics": [
{
"children": [{
"children": [],
"id": "topic_id_1",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_target_1"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_category_1"
},
{
"children":
[{
"children": [],
"id": "topic_id_2",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_target_2"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_category_2"
}
]
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetThreadListTest(ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_thread_list"""
@classmethod
def setUpClass(cls):
super(GetThreadListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.course.cohort_config = {"cohorted": False}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_thread_list([])
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
self.get_thread_list([], course=_discussion_disabled_course_for(self.user))
def test_empty(self):
self.assertEqual(
self.get_thread_list([], num_pages=0).data,
{
"pagination": {
"next": None,
"previous": None,
"num_pages": 0,
"count": 0
},
"results": [],
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["1"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["6"],
"per_page": ["14"],
})
def test_thread_content(self):
self.course.cohort_config = {"cohorted": True}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
source_threads = [
make_minimal_cs_thread({
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"username": self.author.username,
"user_id": str(self.author.id),
"title": "Test Title",
"body": "Test body",
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
make_minimal_cs_thread({
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"username": self.author.username,
"user_id": str(self.author.id),
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"votes": {"up_count": 9},
"comments_count": 18,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
})
]
expected_threads = [
self.expected_thread_data({
"id": "test_thread_id_0",
"author": self.author.username,
"topic_id": "topic_x",
"vote_count": 4,
"comment_count": 6,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"has_endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
self.expected_thread_data({
"id": "test_thread_id_1",
"author": self.author.username,
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"vote_count": 9,
"comment_count": 19,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
}),
]
expected_result = make_paginated_api_response(
results=expected_threads, count=2, num_pages=1, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list(source_threads).data,
expected_result
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link="http://testserver/test_path?page=2", previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[],
count=0,
num_pages=3,
next_link="http://testserver/test_path?page=3",
previous_link="http://testserver/test_path?page=1"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link=None, previous_link="http://testserver/test_path?page=2"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3).data,
expected_result
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(PageNotFoundError):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": text_search_rewrite})
self.register_get_threads_search_response([], text_search_rewrite, num_pages=0)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
).data,
expected_result
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["10"],
"text": ["test search string"],
})
def test_following(self):
self.register_subscribed_threads_response(self.user, [], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
following=True,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/users/{}/subscribed_threads".format(self.user.id)
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
@ddt.data("unanswered", "unread")
def test_view_query(self, query):
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
view=query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
query: ["true"],
})
@ddt.data(
("last_activity_at", "activity"),
("comment_count", "comments"),
("vote_count", "votes")
)
@ddt.unpack
def test_order_by_query(self, http_query, cc_query):
"""
Tests the order_by parameter
Arguments:
http_query (str): Query string sent in the http request
cc_query (str): Query string used for the comments client service
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_by=http_query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": [cc_query],
"page": ["1"],
"per_page": ["11"],
})
def test_order_direction(self):
"""
Only "desc" is supported for order. Also, since it is simply swallowed,
it isn't included in the params.
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="desc",
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
def test_invalid_order_direction(self):
"""
Test with invalid order_direction (e.g. "asc")
"""
with self.assertRaises(ValidationError) as assertion:
self.register_get_threads_response([], page=1, num_pages=0)
get_thread_list( # pylint: disable=expression-not-assigned
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="asc",
).data
self.assertIn("order_direction", assertion.exception.message_dict)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCommentListTest(ForumsEnableMixin, CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Test for get_comment_list"""
@classmethod
def setUpClass(cls):
super(GetCommentListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(ThreadNotFoundError):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
with self.assertRaises(DiscussionDisabledError):
self.get_comment_list(
self.make_minimal_cs_thread(
overrides={"course_id": unicode(disabled_course.id)}
)
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment({"username": self.user.username})],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"user_id": [str(self.user.id)],
"mark_as_read": ["False"],
"recursive": ["False"],
"resp_skip": ["70"],
"resp_limit": ["14"],
"with_responses": ["True"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"child_count": 0,
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"child_count": 0,
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
).data["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment", "username": self.user.username})],
"non_endorsed_responses": [make_minimal_cs_comment({
"id": "non_endorsed_comment", "username": self.user.username
})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True).data
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False).data
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"username": self.user.username,
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"},
})
]
})
actual_comments = self.get_comment_list(thread).data["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment({"username": self.user.username})],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5).data
self.assertIsNone(actual["pagination"]["next"])
self.assertIsNone(actual["pagination"]["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["pagination"]["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2).data
self.assertIsNone(actual["pagination"]["next"])
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({
"id": "comment_{}".format(i),
"username": self.user.username
}) for i in range(10)]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size).data
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["pagination"]["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["pagination"]["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
@ddt.ddt
@disable_signal(api, 'thread_created')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_thread"""
LONG_TITLE = (
'Lorem ipsum dolor sit amet, consectetuer adipiscing elit. '
'Aenean commodo ligula eget dolor. Aenean massa. Cum sociis '
'natoque penatibus et magnis dis parturient montes, nascetur '
'ridiculus mus. Donec quam felis, ultricies nec, '
'pellentesque eu, pretium quis, sem. Nulla consequat massa '
'quis enim. Donec pede justo, fringilla vel, aliquet nec, '
'vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet '
'a, venenatis vitae, justo. Nullam dictum felis eu pede '
'mollis pretium. Integer tincidunt. Cras dapibus. Vivamus '
'elementum semper nisi. Aenean vulputate eleifend tellus. '
'Aenean leo ligula, porttitor eu, consequat vitae, eleifend '
'ac, enim. Aliquam lorem ante, dapibus in, viverra quis, '
'feugiat a, tellus. Phasellus viverra nulla ut metus varius '
'laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies '
'nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam '
'eget dui. Etiam rhoncus. Maecenas tempus, tellus eget '
'condimentum rhoncus, sem quam semper libero, sit amet '
'adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, '
'luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et '
'ante tincidunt tempus. Donec vitae sapien ut libero '
'venenatis faucibus. Nullam quis ante. Etiam sit amet orci '
'eget eros faucibus tincidunt. Duis leo. Sed fringilla '
'mauris sit amet nibh. Donec sodales sagittis magna. Sed '
'consequat, leo eget bibendum sodales, augue velit cursus '
'nunc, quis gravida magna mi a libero. Fusce vulputate '
'eleifend sapien. Vestibulum purus quam, scelerisque ut, '
'mollis sed, nonummy id, metus. Nullam accumsan lorem in '
'dui. Cras ultricies mi eu turpis hendrerit fringilla. '
'Vestibulum ante ipsum primis in faucibus orci luctus et '
'ultrices posuere cubilia Curae; In ac dui quis mi '
'consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu '
'tortor, suscipit eget, imperdiet nec, imperdiet iaculis, '
'ipsum. Sed aliquam ultrices mauris. Integer ante arcu, '
'accumsan a, consectetuer eget, posuere ut, mauris. Praesent '
'adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc '
'nonummy metus.'
)
@classmethod
def setUpClass(cls):
super(CreateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_thread(self.request, self.minimal_data)
expected = self.expected_thread_data({
"id": "test_id",
"course_id": unicode(self.course.id),
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"read": True,
})
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"title_truncated": False,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@mock.patch("eventtracking.tracker.emit")
def test_title_truncation(self, mock_emit):
data = self.minimal_data.copy()
data['title'] = self.LONG_TITLE
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
create_thread(self.request, data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": self.LONG_TITLE[:1000],
"title_truncated": True,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group_set", "group_is_none", "group_is_set"],
)
)
@ddt.unpack
def test_group_id(self, role_name, course_is_cohorted, topic_is_cohorted, data_group_state):
"""
Tests whether the user has permission to create a thread with certain
group_id values.
If there is no group, user cannot create a thread.
Else if group is None or set, and the course is not cohorted and/or the
role is a student, user can create a thread.
"""
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
if course_is_cohorted:
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_post_thread_response({"username": self.user.username})
data = self.minimal_data.copy()
data["course_id"] = unicode(cohort_course.id)
if data_group_state == "group_is_none":
data["group_id"] = None
elif data_group_state == "group_is_set":
if course_is_cohorted:
data["group_id"] = cohort.id + 1
else:
data["group_id"] = 1 # Set to any value since there is no cohort
expected_error = (
data_group_state in ["group_is_none", "group_is_set"] and
(not course_is_cohorted or role_name == FORUM_ROLE_STUDENT)
)
try:
create_thread(self.request, data)
self.assertFalse(expected_error)
actual_post_data = httpretty.last_request().parsed_body
if data_group_state == "group_is_set":
self.assertEqual(actual_post_data["group_id"], [str(data["group_id"])])
elif data_group_state == "no_group_set" and course_is_cohorted and topic_is_cohorted:
self.assertEqual(actual_post_data["group_id"], [str(cohort.id)])
else:
self.assertNotIn("group_id", actual_post_data)
except ValidationError as ex:
if not expected_error:
self.fail(u"Unexpected validation error: {}".format(ex))
def test_following(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'thread_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_flag_response("test_id")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, {"course_id": "non/existent/course"})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.minimal_data["course_id"] = unicode(disabled_course.id)
with self.assertRaises(DiscussionDisabledError):
create_thread(self.request, self.minimal_data)
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
@disable_signal(api, 'comment_created')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_comment"""
@classmethod
def setUpClass(cls):
super(CreateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
with self.assert_signal_sent(api, 'comment_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
)
)
@ddt.unpack
def test_endorsed(self, role_name, is_thread_author, thread_type):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"thread_type": thread_type,
"user_id": str(self.user.id) if is_thread_author else str(self.user.id + 1),
})
)
self.register_post_comment_response({"username": self.user.username}, "test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(not is_thread_author or thread_type == "discussion")
)
try:
create_comment(self.request, data)
self.assertEqual(httpretty.last_request().parsed_body["endorsed"], ["True"])
self.assertFalse(expected_error)
except ValidationError:
self.assertTrue(expected_error)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'comment_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_flag_response("test_comment")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
create_comment(self.request, self.minimal_data)
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(disabled_course.id),
"commentable_id": "test_topic",
})
)
with self.assertRaises(DiscussionDisabledError):
create_comment(self.request, self.minimal_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({"username": self.user.username}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
@disable_signal(api, 'thread_edited')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_thread"""
@classmethod
def setUpClass(cls):
super(UpdateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
self.assertEqual(actual, self.expected_thread_data({
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"topic_id": "original_topic",
"read": True,
"title": "Original Title",
}))
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"read": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the thread should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_vote_status}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'original_topic',
'id': 'test_thread'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
starting_vote_count = 1
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_thread(self.request, "test_thread", data)
self.register_thread(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
#setup
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_thread"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_thread(request, "test_thread", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the thread should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_thread_flag_response("test_thread")
self.register_thread({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/threads/test_thread/abuse_flag"
unflag_url = "/api/v1/threads/test_thread/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field may not be blank."]}
)
@ddt.ddt
@disable_signal(api, 'comment_edited')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_comment"""
@classmethod
def setUpClass(cls):
super(UpdateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None, course=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
if course is None:
course = self.course
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
with self.assert_signal_sent(api, 'comment_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(CommentNotFoundError):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
self.register_comment(course=_discussion_disabled_course_for(self.user))
with self.assertRaises(DiscussionDisabledError):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the comment should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": new_vote_status}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if new_vote_status else 0)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.response.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'dummy',
'id': 'test_comment'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
starting_vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_comment(self.request, "test_comment", data)
self.register_comment(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_comment"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_comment(request, "test_comment", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the comment should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_comment_flag_response("test_comment")
self.register_comment({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/comments/test_comment/abuse_flag"
unflag_url = "/api/v1/comments/test_comment/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
@ddt.ddt
@disable_signal(api, 'thread_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_thread"""
@classmethod
def setUpClass(cls):
super(DeleteThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@disable_signal(api, 'comment_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_comment"""
@classmethod
def setUpClass(cls):
super(DeleteCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
with self.assert_signal_sent(api, 'comment_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(CommentNotFoundError):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_comment_and_thread(
thread_overrides={"course_id": unicode(disabled_course.id)},
overrides={"course_id": unicode(disabled_course.id)}
)
with self.assertRaises(DiscussionDisabledError):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class RetrieveThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase
):
"""Tests for get_thread"""
@classmethod
def setUpClass(cls):
super(RetrieveThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(RetrieveThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for GET on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"title": "Test Title",
"body": "Test body",
"resp_total": 0,
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
def test_basic(self):
self.register_thread({"resp_total": 2})
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"response_count": 2,
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
get_thread(self.request, "missing_thread")
def test_nonauthor_enrolled_in_course(self):
non_author_user = UserFactory.create()
self.register_get_user_response(non_author_user)
CourseEnrollmentFactory.create(user=non_author_user, course_id=self.course.id)
self.register_thread()
self.request.user = non_author_user
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_not_enrolled_in_course(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
get_thread(self.request, self.thread_id)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for retrieving a thread
All privileged roles are able to retrieve a thread. A student role can
only retrieve a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
get_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
| agpl-3.0 | 4,310,273,995,939,487,000 | 39.28949 | 120 | 0.568971 | false |
ehabkost/virt-test | qemu/tests/usb_hotplug.py | 1 | 1705 | import logging, re, uuid
from autotest.client.shared import error
from autotest.client import utils
@error.context_aware
def run_usb_hotplug(test, params, env):
"""
Test usb hotplug
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
device = params.get("usb_type_testdev")
product = params.get("product")
# compose strings
monitor_add = "device_add %s" % device
monitor_add += ",bus=usbtest.0,id=usbplugdev"
monitor_del = "device_del usbplugdev"
error.context("Log into guest", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login()
session.cmd_status("dmesg -c")
error.context("Plugin usb device", logging.info)
reply = vm.monitor.cmd(monitor_add)
if reply.find("Parameter 'driver' expects a driver name") != -1:
raise error.TestNAError("usb device %s not available" % device)
session.cmd_status("sleep 1")
session.cmd_status("udevadm settle")
messages_add = session.cmd("dmesg -c")
for line in messages_add.splitlines():
logging.debug("[dmesg add] %s" % line)
if messages_add.find("Product: %s" % product) == -1:
raise error.TestFail("kernel didn't detect plugin")
error.context("Unplug usb device", logging.info)
vm.monitor.cmd(monitor_del)
session.cmd_status("sleep 1")
messages_del = session.cmd("dmesg -c")
for line in messages_del.splitlines():
logging.debug("[dmesg del] %s" % line)
if messages_del.find("USB disconnect") == -1:
raise error.TestFail("kernel didn't detect unplug")
session.close()
| gpl-2.0 | 3,051,980,685,977,966,000 | 32.431373 | 71 | 0.659824 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_chart_column07.py | 1 | 2108 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_column07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68810240, 68811776]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=(Sheet1!$A$1:$A$2,Sheet1!$A$4:$A$5)',
'values_data': [1, 2, 4, 5],
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 3,955,221,327,216,043,000 | 25.024691 | 79 | 0.486243 | false |
shaneoc/atom | atom/router/directory.py | 1 | 1333 |
class Directory(object):
def __init__(self, router):
self.router = router
def start(self):
db = self.router.database
db.execute(
'CREATE TABLE IF NOT EXISTS users (' +
'id INTEGER PRIMARY KEY, name TEXT, password TEXT)')
db.execute(
'INSERT OR REPLACE INTO users VALUES (0, ?, ?)', ('system', None))
db.execute(
'CREATE TABLE IF NOT EXISTS modules (' +
'id INTEGER PRIMARY KEY, name TEXT)')
db.execute(
'CREATE TABLE IF NOT EXISTS hostnames (' +
'id INTEGER PRIMARY KEY, hostname TEXT UNIQUE, module_id INTEGER)')
def get_users(self):
pass
def get_modules(self):
pass
def get_system_hostname(self):
return 'sys.xvc.cc:8080'
def get_shell_hostname(self, uid):
return 'home.xvc.cc:8080'
def check_login(self, username, password):
if username == 'shane' and password == 'test':
return 1
else:
return None
def check_authorization(self, uid, hostname):
return True
def get_socket(self, hostname, uri):
return False
class Module(object):
def get_endpoint(self, path):
pass
class User(object):
pass | mit | 6,492,416,664,778,558,000 | 24.653846 | 79 | 0.543136 | false |
bokeh/bokeh | examples/app/fourier_animated.py | 1 | 6647 | ''' Show a streaming, updating representation of Fourier Series.
The example was inspired by `this video`_.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve fourier_animated.py
at your command prompt. Then navigate to the URL
http://localhost:5006/fourier_animated
in your browser.
.. _this video: https://www.youtube.com/watch?v=LznjC4Lo7lE
'''
from collections import OrderedDict
import numpy as np
from bokeh.driving import repeat
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
pi = np.pi
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(
curve=ColumnDataSource(dict(x=[], base_x=[], y=[])),
lines=ColumnDataSource(dict(line_x=[], line_y=[], radius_x=[], radius_y=[])),
circle_point=ColumnDataSource(dict(x=[], y=[], r=[])),
circleds=ColumnDataSource(dict(x=[], y=[]))
)
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
# create new figure
p = figure(title=title, width=800, height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
# compute the full fourier eq
full_y = sum(foo(x) for foo in foos)
# replace the foo curve with the full fourier eq
sources['curve'] = ColumnDataSource(dict(x=x, base_x=base_x, y=full_y))
# draw the line
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'])
if i==len(foos)-1:
# if it's the last foo let's draw a circle on the head of the curve
sources['floating_point'] = ColumnDataSource({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color=palette[i], line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
# draw the circle, radius and circle point related to foo domain
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum(foo(newx) for foo in foos)
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, width=800, height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend_label = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend_label = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend_label=legend_label)
create_circle_glyphs(p, palette[i], sources)
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
p.legend.padding = 6
p.legend.margin = 6
p.legend.spacing = 6
return p, _sources
# create the series partials
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = column(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
curdoc().add_root(layout)
curdoc().title = "Fourier Animated"
| bsd-3-clause | 8,983,696,342,426,303,000 | 32.235 | 114 | 0.574846 | false |
mposner/pychess | board.py | 1 | 5063 | # pychess
# mposner 11/23/14
from piece import Piece
from piece_attributes import PieceType, Color
from utils import isValidPosition
class Board:
"""Represents a chess board"""
def __init__(self):
"""Initialize a new chess board"""
self.makeNewPieces()
def __str__(self):
result = " " + 33*"=" + "\n"
for i in range(7,-1,-1):
result += " " + str(i+1) + " | "
for j in range(8):
if self.board[i][j] is None:
result += " | "
else:
result += self.board[i][j].shortstr() + " | "
result = result[:-1] + "\n"
if i > 0:
result += " " + 33*"-" + "\n"
else:
result += " " + 33*"=" + "\n"
result += " " + " ".join(["A","B","C","D","E","F","G","H"])
return result
def getPiece(self, position):
"""Return the piece at the given board square"""
if not isValidPosition(position):
return None
rank = int(position[1]) - 1
file = ord(position[0]) - ord("A")
return self.board[rank][file]
def isValidMove(self, piece, end):
"""See if a move is valid for a given Piece"""
if not isValidPosition(end): #rule out bad position input
return False
startfile = ord(piece.position[0]) #file is column, A-H
startrank = int(piece.position[1]) #rank is row, 1-8
endfile = ord(end[0])
endrank = int(end[1])
filediff = abs(startfile - endfile)
rankdiff = abs(startrank - endrank)
if piece.type == PieceType.KING:
if filediff <= 1 and rankdiff <= 1:
return True
else:
return False
elif piece.type == PieceType.QUEEN:
if filediff == 0 or rankdiff == 0:
return True
elif filediff == rankdiff:
return True
else:
return False
elif piece.type == PieceType.BISHOP:
if filediff == rankdiff:
return True
else:
return False
elif piece.type == PieceType.KNIGHT:
if filediff == 0 and rankdiff == 0:
return True
elif filediff == 1 and rankdiff == 2:
return True
elif filediff == 2 and rankdiff == 1:
return True
else:
return False
elif piece.type == PieceType.ROOK:
if filediff == 0 or rankdiff == 0:
return True
else:
return False
elif piece.type == PieceType.PAWN:
if filediff == 0 and (endrank-startrank) == 1:
# Normal move forward
return True
elif filediff == 1 and rankdiff == 1:
# Only valid if taking an enemy piece
if self.getPiece(end) is not None and \
self.getPiece(end).color != piece.color:
return True
elif filediff == 0 and (endrank-startrank) == 2:
# Only valid if pawn is starting from starting position
if int(piece.position[1]) == 2:
return True
return False
def makeNewPieces(self):
"""Make a new set of pieces"""
white = []
white.append(Piece(Color.WHITE, PieceType.ROOK, "A1"))
white.append(Piece(Color.WHITE, PieceType.KNIGHT, "B1"))
white.append(Piece(Color.WHITE, PieceType.BISHOP, "C1"))
white.append(Piece(Color.WHITE, PieceType.QUEEN, "D1"))
white.append(Piece(Color.WHITE, PieceType.KING, "E1"))
white.append(Piece(Color.WHITE, PieceType.BISHOP, "F1"))
white.append(Piece(Color.WHITE, PieceType.KNIGHT, "G1"))
white.append(Piece(Color.WHITE, PieceType.ROOK, "H1"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "A2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "B2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "C2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "D2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "E2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "F2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "G2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "H2"))
black = []
black.append(Piece(Color.BLACK, PieceType.ROOK, "A8"))
black.append(Piece(Color.BLACK, PieceType.KNIGHT, "B8"))
black.append(Piece(Color.BLACK, PieceType.BISHOP, "C8"))
black.append(Piece(Color.BLACK, PieceType.QUEEN, "D8"))
black.append(Piece(Color.BLACK, PieceType.KING, "E8"))
black.append(Piece(Color.BLACK, PieceType.BISHOP, "F8"))
black.append(Piece(Color.BLACK, PieceType.KNIGHT, "G8"))
black.append(Piece(Color.BLACK, PieceType.ROOK, "H8"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "A7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "B7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "C7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "D7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "E7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "F7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "G7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "H7"))
self.white = white
self.black = black
#2-D array representing the board (board[0] = rank 1)
board = [[] for i in range(8)]
board[0] = white[0:8]
board[1] = white[8:]
board[2] = [None for i in range(8)]
board[3] = [None for i in range(8)]
board[4] = [None for i in range(8)]
board[5] = [None for i in range(8)]
board[6] = black[8:]
board[7] = black[0:8]
self.board = board
| gpl-2.0 | -1,243,340,808,460,693,000 | 26.818681 | 69 | 0.633419 | false |
Subsets and Splits