code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# face_recognition documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['face_recognition_models', 'Click', 'dlib', 'numpy', 'PIL']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import face_recognition
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Face Recognition'
copyright = u"2017, Adam Geitgey"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = face_recognition.__version__
# The full version, including alpha/beta/rc tags.
release = face_recognition.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'face_recognitiondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'face_recognition.tex',
u'Face Recognition Documentation',
u'Adam Geitgey', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'face_recognition',
u'Face Recognition Documentation',
[u'Adam Geitgey'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'face_recognition',
u'Face Recognition Documentation',
u'Adam Geitgey',
'face_recognition',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| ageitgey/face_recognition | docs/conf.py | Python | mit | 8,789 |
Given a set of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
The same repeated number may be chosen from C unlimited number of times.
Note:
All numbers (including target) will be positive integers.
Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).
The solution set must not contain duplicate combinations.
For example, given candidate set 2,3,6,7 and target 7,
A solution set is:
[7]
[2, 2, 3]
class Solution:
# @param candidates, a list of integers
# @param target, integer
# @return a list of lists of integers
# Recursion
# Sort the array at first, then use the recursion to find
# the result, Time O(n^2)
# 96ms
def combinationSum(self, candidates, target):
candidates.sort()
self.result = []
self.dfs(candidates,target,0,[])
return self.result
def dfs(self,candidates,target,start,reslist):
length = len(candidates)
if target == 0:
return self.result.append(reslist)
for i in xrange(start,length):
if target < candidates[i]:return
self.dfs(candidates,target-candidates[i],i,reslist+[candidates[i]])
# DFS, not sort array (220ms)
def combinationSum(self, candidates, target):
self.result = []
self.dfs(candidates,0,target,[])
return self.result
def dfs(self,can,cursum,target,res):
if cursum > target: return
if cursum == target:
self.result.append(res)
return
for i in xrange(len(can)):
if not res or res[len(res)-1] <= can[i]:
self.dfs(can,cursum+can[i],target,res+[can[i]])
For the combination_sum2, just change the start index from i to the i+1
Time Complexity: T(n) = T(n-1) + 1 = O(n) ?
| UmassJin/Leetcode | Array/combination_sum1.py | Python | mit | 1,940 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('artist', '0002_auto_20150322_1630'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.CharField(max_length=500, verbose_name='Location')),
('date_time', models.DateTimeField(verbose_name='Date & Time')),
('artist', models.ForeignKey(to='artist.Artist')),
],
options={
},
bases=(models.Model,),
),
]
| fotcorn/liveinconcert | event/migrations/0001_initial.py | Python | mit | 777 |
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import cbh_core_model
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cbh_core_model'
copyright = u'2015, Andrew Stretton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cbh_core_model.__version__
# The full version, including alpha/beta/rc tags.
release = cbh_core_model.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cbh_core_modeldoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cbh_core_model.tex', u'cbh_core_model Documentation',
u'Andrew Stretton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cbh_core_model', u'cbh_core_model Documentation',
[u'Andrew Stretton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cbh_core_model', u'cbh_core_model Documentation',
u'Andrew Stretton', 'cbh_core_model', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| thesgc/cbh_core_model | docs/conf.py | Python | mit | 8,194 |
import numpy as np
import keras as ks
import matplotlib.pyplot as plt
from keras.datasets import boston_housing
from keras import models
from keras import layers
from keras.utils.np_utils import to_categorical
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
mean = train_data.mean(axis = 0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
def build_model():
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
k = 4
num_val_samples = len(train_data) // k
num_epochs = 500
all_scores = []
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
val_data = train_data[i * num_val_samples : (i+1) * num_val_samples]
val_targets = train_targets[i * num_val_samples : (i+1) * num_val_samples]
partial_train_data = np.concatenate([train_data[: i * num_val_samples], train_data[(i+1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate([train_targets[: i * num_val_samples], train_targets[(i+1) * num_val_samples:]], axis=0)
model = build_model()
history = model.fit(partial_train_data, partial_train_targets, epochs=num_epochs, batch_size=1, verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_mae_histories.append(history.history['mean_absolute_error'])
all_scores.append(val_mae)
print(all_scores)
average_mae_history = [np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
# plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.plot(average_mae_history[10:])
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show() | FiveEye/ml-notebook | dlp/ch3_3_boston_housing.py | Python | mit | 1,900 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ckanutils
~~~~~~~~~
Provides methods for interacting with a CKAN instance
Examples:
literal blocks::
python example_google.py
Attributes:
CKAN_KEYS (List[str]): available CKAN keyword arguments.
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import requests
import ckanapi
import itertools as it
from os import environ, path as p
from datetime import datetime as dt
from operator import itemgetter
from pprint import pprint
from ckanapi import NotFound, NotAuthorized, ValidationError
from tabutils import process as pr, io, fntools as ft, convert as cv
__version__ = '0.14.9'
__title__ = 'ckanutils'
__author__ = 'Reuben Cummings'
__description__ = 'Miscellaneous CKAN utility library'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CKAN_KEYS = ['hash_table', 'remote', 'api_key', 'ua', 'force', 'quiet']
API_KEY_ENV = 'CKAN_API_KEY'
REMOTE_ENV = 'CKAN_REMOTE_URL'
UA_ENV = 'CKAN_USER_AGENT'
DEF_USER_AGENT = 'ckanutils/%s' % __version__
DEF_HASH_PACK = 'hash-table'
DEF_HASH_RES = 'hash-table.csv'
CHUNKSIZE_ROWS = 10 ** 3
CHUNKSIZE_BYTES = 2 ** 20
ENCODING = 'utf-8'
class CKAN(object):
"""Interacts with a CKAN instance.
Attributes:
force (bool): Force.
verbose (bool): Print debug statements.
quiet (bool): Suppress debug statements.
address (str): CKAN url.
hash_table (str): The hash table package id.
keys (List[str]):
"""
def __init__(self, **kwargs):
"""Initialization method.
Args:
**kwargs: Keyword arguments.
Kwargs:
hash_table (str): The hash table package id.
remote (str): The remote ckan url.
api_key (str): The ckan api key.
ua (str): The user agent.
force (bool): Force (default: True).
quiet (bool): Suppress debug statements (default: False).
Returns:
New instance of :class:`CKAN`
Examples:
>>> CKAN() #doctest: +ELLIPSIS
<ckanutils.CKAN object at 0x...>
"""
default_ua = environ.get(UA_ENV, DEF_USER_AGENT)
def_remote = environ.get(REMOTE_ENV)
def_api_key = environ.get(API_KEY_ENV)
remote = kwargs.get('remote', def_remote)
self.api_key = kwargs.get('api_key', def_api_key)
self.force = kwargs.get('force', True)
self.quiet = kwargs.get('quiet')
self.user_agent = kwargs.get('ua', default_ua)
self.verbose = not self.quiet
self.hash_table = kwargs.get('hash_table', DEF_HASH_PACK)
ckan_kwargs = {'apikey': self.api_key, 'user_agent': self.user_agent}
attr = 'RemoteCKAN' if remote else 'LocalCKAN'
ckan = getattr(ckanapi, attr)(remote, **ckan_kwargs)
self.address = ckan.address
self.package_show = ckan.action.package_show
try:
self.hash_table_pack = self.package_show(id=self.hash_table)
except NotFound:
self.hash_table_pack = None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
self.hash_table_pack = None
else:
raise err
try:
self.hash_table_id = self.hash_table_pack['resources'][0]['id']
except (IndexError, TypeError):
self.hash_table_id = None
# shortcuts
self.datastore_search = ckan.action.datastore_search
self.datastore_create = ckan.action.datastore_create
self.datastore_delete = ckan.action.datastore_delete
self.datastore_upsert = ckan.action.datastore_upsert
self.datastore_search = ckan.action.datastore_search
self.resource_show = ckan.action.resource_show
self.resource_create = ckan.action.resource_create
self.package_create = ckan.action.package_create
self.package_update = ckan.action.package_update
self.package_privatize = ckan.action.bulk_update_private
self.revision_show = ckan.action.revision_show
self.organization_list = ckan.action.organization_list_for_user
self.organization_show = ckan.action.organization_show
self.license_list = ckan.action.license_list
self.group_list = ckan.action.group_list
self.user = ckan.action.get_site_user()
def create_table(self, resource_id, fields, **kwargs):
"""Creates a datastore table for an existing filestore resource.
Args:
resource_id (str): The filestore resource id.
fields (List[dict]): fields/columns and their extra metadata.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Create resource even if read-only.
aliases (List[str]): name(s) for read only alias(es) of the
resource.
primary_key (List[str]): field(s) that represent a unique key.
indexes (List[str]): index(es) on table.
Returns:
dict: The newly created data object.
Raises:
ValidationError: If unable to validate user on ckan site.
NotFound: If unable to find resource.
Examples:
>>> CKAN(quiet=True).create_table('rid', fields=[{'id': 'field', \
'type': 'text'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
kwargs['fields'] = fields
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
if self.verbose:
print('Creating table `%s` in datastore...' % resource_id)
try:
return self.datastore_create(**kwargs)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise
def delete_table(self, resource_id, **kwargs):
"""Deletes a datastore table.
Args:
resource_id (str): The datastore resource id.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Delete resource even if read-only.
filters (dict): Filters to apply before deleting, e.g.,
{"name": "fred"}. If missing delete whole table and all
dependent views.
Returns:
dict: Original filters sent if table was found, `None` otherwise.
Raises:
ValidationError: If unable to validate user on ckan site.
Examples:
>>> CKAN(quiet=True).delete_table('rid')
Can't delete. Table `rid` was not found in datastore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
init_msg = "Can't delete. Table `%s`" % resource_id
err_msg = '%s was not found in datastore.' % init_msg
read_msg = '%s is read only.' % init_msg
if self.verbose:
print('Deleting table `%s` from datastore...' % resource_id)
try:
result = self.datastore_delete(**kwargs)
except NotFound:
print(err_msg)
result = None
except ValidationError as err:
if 'read-only' in err.error_dict:
print(read_msg)
print("Set 'force' to True and try again.")
result = None
elif err.error_dict.get('resource_id') == ['Not found: Resource']:
print(err_msg)
result = None
else:
raise err
return result
def insert_records(self, resource_id, records, **kwargs):
"""Inserts records into a datastore table.
Args:
resource_id (str): The datastore resource id.
records (List[dict]): The records to insert.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
method (str): Insert method. One of ['update, 'insert', 'upsert']
(default: 'insert').
force (bool): Create resource even if read-only.
start (int): Row number to start from (zero indexed).
stop (int): Row number to stop at (zero indexed).
chunksize (int): Number of rows to write at a time.
Returns:
int: Number of records inserted.
Raises:
NotFound: If unable to find the resource.
Examples:
>>> CKAN(quiet=True).insert_records('rid', [{'field': 'value'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
recoded = pr.json_recode(records)
chunksize = kwargs.pop('chunksize', 0)
start = kwargs.pop('start', 0)
stop = kwargs.pop('stop', None)
kwargs.setdefault('force', self.force)
kwargs.setdefault('method', 'insert')
kwargs['resource_id'] = resource_id
count = 1
for chunk in ft.chunk(recoded, chunksize, start=start, stop=stop):
length = len(chunk)
if self.verbose:
print(
'Adding records %i - %i to resource %s...' % (
count, count + length - 1, resource_id))
kwargs['records'] = chunk
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
self.datastore_upsert(**kwargs)
except requests.exceptions.ConnectionError as err:
if 'Broken pipe' in err.message[1]:
print('Chunksize too large. Try using a smaller chunksize.')
return 0
else:
raise err
except NotFound:
# Keep exception message consistent with the others
raise NotFound(err_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
count += length
return count
def get_hash(self, resource_id):
"""Gets the hash of a datastore table.
Args:
resource_id (str): The datastore resource id.
Returns:
str: The datastore resource hash.
Raises:
NotFound: If `hash_table_id` isn't set or not in datastore.
NotAuthorized: If unable to authorize ckan user.
Examples:
>>> CKAN(hash_table='hash_jhb34rtj34t').get_hash('rid')
Traceback (most recent call last):
NotFound: {u'item': u'package', u'message': u'Package \
`hash_jhb34rtj34t` was not found!'}
"""
if not self.hash_table_pack:
message = 'Package `%s` was not found!' % self.hash_table
raise NotFound({'message': message, 'item': 'package'})
if not self.hash_table_id:
message = 'No resources found in package `%s`!' % self.hash_table
raise NotFound({'message': message, 'item': 'resource'})
kwargs = {
'resource_id': self.hash_table_id,
'filters': {'datastore_id': resource_id},
'fields': 'hash',
'limit': 1
}
err_msg = 'Resource `%s` was not found' % resource_id
alt_msg = 'Hash table `%s` was not found' % self.hash_table_id
try:
result = self.datastore_search(**kwargs)
resource_hash = result['records'][0]['hash']
except NotFound:
message = '%s in datastore!' % alt_msg
raise NotFound({'message': message, 'item': 'datastore'})
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound('%s in filestore.' % err_msg)
else:
raise err
except IndexError:
print('%s in hash table.' % err_msg)
resource_hash = None
if self.verbose:
print('Resource `%s` hash is `%s`.' % (resource_id, resource_hash))
return resource_hash
def fetch_resource(self, resource_id, user_agent=None, stream=True):
"""Fetches a single resource from filestore.
Args:
resource_id (str): The filestore resource id.
Kwargs:
user_agent (str): The user agent.
stream (bool): Stream content (default: True).
Returns:
obj: requests.Response object.
Raises:
NotFound: If unable to find the resource.
NotAuthorized: If access to fetch resource is denied.
Examples:
>>> CKAN(quiet=True).fetch_resource('rid')
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
user_agent = user_agent or self.user_agent
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
raise NotFound(err_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
url = resource.get('perma_link') or resource.get('url')
if self.verbose:
print('Downloading url %s...' % url)
headers = {'User-Agent': user_agent}
r = requests.get(url, stream=stream, headers=headers)
err_msg = 'Access to fetch resource %s was denied.' % resource_id
if any('403' in h.headers.get('x-ckan-error', '') for h in r.history):
raise NotAuthorized(err_msg)
elif r.status_code == 401:
raise NotAuthorized(err_msg)
else:
return r
def get_filestore_update_func(self, resource, **kwargs):
"""Returns the function to create or update a single resource on
filestore. To create a resource, you must supply either `url`,
`filepath`, or `fileobj`.
Args:
resource (dict): The resource passed to resource_create.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link, requires `format`).
format (str): New file format (for file link, requires `url`).
fileobj (obj): New file like object (for file upload).
filepath (str): New file path (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
tuple: (func, args, data)
where func is `requests.post` if `post` option is specified,
`self.resource_create` otherwise. `args` and `data` should be
passed as *args and **kwargs respectively.
See also:
ckanutils._update_filestore
Examples:
>>> ckan = CKAN(quiet=True)
>>> resource = {
... 'name': 'name', 'package_id': 'pid', 'resource_id': 'rid',
... 'description': 'description', 'hash': 'hash'}
>>> kwargs = {'url': 'http://example.com/file', 'format': 'csv'}
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> func, args, kwargs = res
>>> func(*args, **kwargs)
Traceback (most recent call last):
NotFound: Not found
"""
post = kwargs.pop('post', None)
filepath = kwargs.pop('filepath', None)
fileobj = kwargs.pop('fileobj', None)
f = open(filepath, 'rb') if filepath else fileobj
resource.update(kwargs)
if post:
args = ['%s/api/action/resource_create' % self.address]
hdrs = {
'X-CKAN-API-Key': self.api_key, 'User-Agent': self.user_agent}
data = {'data': resource, 'headers': hdrs}
data.update({'files': {'upload': f}}) if f else None
func = requests.post
else:
args = []
resource.update({'upload': f}) if f else None
data = {
k: v for k, v in resource.items() if not isinstance(v, dict)}
func = self.resource_create
return (func, args, data)
def _update_filestore(self, func, *args, **kwargs):
"""Helps create or update a single resource on filestore.
To create a resource, you must supply either `url`, `filepath`, or
`fileobj`.
Args:
func (func): The resource passed to resource_create.
*args: Postional arguments that are passed to `func`
**kwargs: Keyword arguments that are passed to `func`.
Kwargs:
url (str): New file url (for file link).
fileobj (obj): New file like object (for file upload).
filepath (str): New file path (for file upload).
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
See also:
ckanutils.get_filestore_update_func
Examples:
>>> ckan = CKAN(quiet=True)
>>> url = 'http://example.com/file'
>>> resource = {'package_id': 'pid'}
>>> kwargs = {'name': 'name', 'url': url, 'format': 'csv'}
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> ckan._update_filestore(res[0], *res[1], **res[2])
Package `pid` was not found.
>>> resource['resource_id'] = 'rid'
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> ckan._update_filestore(res[0], *res[1], **res[2])
Resource `rid` was not found in filestore.
"""
data = kwargs.get('data', {})
files = kwargs.get('files', {})
resource_id = kwargs.get('resource_id', data.get('resource_id'))
package_id = kwargs.get('package_id', data.get('package_id'))
f = kwargs.get('upload', files.get('upload'))
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
r = func(*args, **kwargs) or {'id': None}
except NotFound:
pck_msg = 'Package `%s` was not found.' % package_id
print(err_msg if resource_id else pck_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
print(err_msg)
r = None
else:
raise err
except requests.exceptions.ConnectionError as err:
if 'Broken pipe' in err.message[1]:
print('File size too large. Try uploading a smaller file.')
r = None
else:
raise err
else:
return r
finally:
f.close() if f else None
def create_resource(self, package_id, **kwargs):
"""Creates a single resource on filestore. You must supply either
`url`, `filepath`, or `fileobj`.
Args:
package_id (str): The filestore package id.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link).
filepath (str): New file path (for file upload).
fileobj (obj): New file like object (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name (defaults to the filename).
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
Raises:
TypeError: If neither `url`, `filepath`, nor `fileobj` are supplied.
Examples:
>>> ckan = CKAN(quiet=True)
>>> ckan.create_resource('pid')
Traceback (most recent call last):
TypeError: You must specify either a `url`, `filepath`, or `fileobj`
>>> ckan.create_resource('pid', url='http://example.com/file')
Package `pid` was not found.
"""
if not any(map(kwargs.get, ['url', 'filepath', 'fileobj'])):
raise TypeError(
'You must specify either a `url`, `filepath`, or `fileobj`')
path = filter(None, map(kwargs.get, ['url', 'filepath', 'fileobj']))[0]
try:
if 'docs.google.com' in path:
def_name = path.split('gid=')[1].split('&')[0]
else:
def_name = p.basename(path)
except AttributeError:
def_name = None
file_format = 'csv'
else:
# copy/pasted from utils... fix later
if 'format=' in path:
file_format = path.split('format=')[1].split('&')[0]
else:
file_format = p.splitext(path)[1].lstrip('.')
kwargs.setdefault('name', def_name)
# Will get `ckan.logic.ValidationError` if url isn't set
kwargs.setdefault('url', 'http://example.com')
kwargs['format'] = file_format
resource = {'package_id': package_id}
if self.verbose:
print('Creating new resource in package %s...' % package_id)
func, args, data = self.get_filestore_update_func(resource, **kwargs)
return self._update_filestore(func, *args, **data)
def update_filestore(self, resource_id, **kwargs):
"""Updates a single resource on filestore.
Args:
resource_id (str): The filestore resource id.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link).
filepath (str): New file path (for file upload).
fileobj (obj): New file like object (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
Examples:
>>> CKAN(quiet=True).update_filestore('rid')
Resource `rid` was not found in filestore.
"""
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
print(err_msg)
return None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
else:
resource['package_id'] = self.get_package_id(resource_id)
if self.verbose:
print('Updating resource %s...' % resource_id)
f, args, data = self.get_filestore_update_func(resource, **kwargs)
return self._update_filestore(f, *args, **data)
def update_datastore(self, resource_id, filepath, **kwargs):
verbose = not kwargs.get('quiet')
chunk_rows = kwargs.get('chunksize_rows')
primary_key = kwargs.get('primary_key')
content_type = kwargs.get('content_type')
type_cast = kwargs.get('type_cast')
method = 'upsert' if primary_key else 'insert'
keys = ['aliases', 'primary_key', 'indexes']
try:
extension = p.splitext(filepath)[1].split('.')[1]
except (IndexError, AttributeError):
# no file extension given, e.g., a tempfile
extension = cv.ctype2ext(content_type)
try:
reader = io.get_reader(extension)
except TypeError:
print('Error: plugin for extension `%s` not found!' % extension)
return False
else:
records = reader(filepath, **kwargs)
first = records.next()
keys = first.keys()
records = it.chain([first], records)
if type_cast:
records, results = pr.detect_types(records)
types = results['types']
casted_records = pr.type_cast(records, types)
else:
types = [{'id': key, 'type': 'text'} for key in keys]
casted_records = records
if verbose:
print('Parsed types:')
pprint(types)
create_kwargs = {k: v for k, v in kwargs.items() if k in keys}
if not primary_key:
self.delete_table(resource_id)
insert_kwargs = {'chunksize': chunk_rows, 'method': method}
self.create_table(resource_id, types, **create_kwargs)
args = [resource_id, casted_records]
return self.insert_records(*args, **insert_kwargs)
def find_ids(self, packages, **kwargs):
default = {'rid': '', 'pname': ''}
kwargs.update({'method': self.query, 'default': default})
return pr.find(packages, **kwargs)
def get_package_id(self, resource_id):
"""Gets the package id of a single resource on filestore.
Args:
resource_id (str): The filestore resource id.
Returns:
str: The package id.
Examples:
>>> CKAN(quiet=True).get_package_id('rid')
Resource `rid` was not found in filestore.
"""
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
print(err_msg)
return None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
else:
revision = self.revision_show(id=resource['revision_id'])
return revision['packages'][0]
def create_hash_table(self, verbose=False):
kwargs = {
'resource_id': self.hash_table_id,
'fields': [
{'id': 'datastore_id', 'type': 'text'},
{'id': 'hash', 'type': 'text'}],
'primary_key': 'datastore_id'
}
if verbose:
print('Creating hash table...')
self.create_table(**kwargs)
def update_hash_table(self, resource_id, resource_hash, verbose=False):
records = [{'datastore_id': resource_id, 'hash': resource_hash}]
if verbose:
print('Updating hash table...')
self.insert_records(self.hash_table_id, records, method='upsert')
def get_update_date(self, item):
timestamps = {
'revision_timestamp': 'revision',
'last_modified': 'resource',
'metadata_modified': 'package'
}
for key, value in timestamps.items():
if key in item:
timestamp = item[key]
item_type = value
break
else:
keys = timestamps.keys()
msg = 'None of the following keys found in item: %s' % keys
raise TypeError(msg)
if not timestamp and item_type == 'resource':
# print('Resource timestamp is empty. Querying revision.')
timestamp = self.revision_show(id=item['revision_id'])['timestamp']
return dt.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
def filter(self, items, tagged=None, named=None, updated=None):
for i in items:
if i['state'] != 'active':
continue
if updated and updated(self.get_update_date(i)):
yield i
continue
if named and named.lower() in i['name'].lower():
yield i
continue
tags = it.imap(itemgetter('name'), i['tags'])
is_tagged = tagged and 'tags' in i
if is_tagged and any(it.ifilter(lambda t: t == tagged, tags)):
yield i
continue
if not (named or tagged or updated):
yield i
def query(self, packages, **kwargs):
pkwargs = {
'named': kwargs.get('pnamed'),
'tagged': kwargs.get('ptagged')}
rkwargs = {
'named': kwargs.get('rnamed'),
'tagged': kwargs.get('rtagged')}
skwargs = {'key': self.get_update_date, 'reverse': True}
filtered_packages = self.filter(packages, **pkwargs)
for pack in sorted(filtered_packages, **skwargs):
package = self.package_show(id=pack['name'])
resources = self.filter(package['resources'], **rkwargs)
for resource in sorted(resources, **skwargs):
yield {'rid': resource['id'], 'pname': package['name']}
| reubano/ckanutils | ckanutils.py | Python | mit | 29,704 |
#!/usr/bin/env python
# coding: utf-8
from .interactiveapp import InteractiveApplication, ENCODING
class InteractiveLoopApplication(InteractiveApplication):
def __init__(self, name, desc, version,
padding, margin, suffix, encoding=ENCODING):
super(InteractiveLoopApplication, self).__init__(
name, desc, version, padding, margin, suffix, encoding)
# loop status
self.STATUS_EXIT = 0
self.STATUS_CONTINUE = 1
def loop(self, func):
def mainloop():
loop_flag = self.STATUS_CONTINUE
while loop_flag == self.STATUS_CONTINUE:
try:
loop_flag = func()
except KeyboardInterrupt:
self.write_error("Terminated.")
self.exit(0)
self.exit(0)
return mainloop
| alice1017/coadlib | coadlib/loopapp.py | Python | mit | 871 |
import re
import datetime
import time
#niru's git commit
while True:
#open the file for reading
file = open("test.txt")
content = file.read()
#Get timestamp
ts = time.time()
ist = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#open file for read and close it neatly(wrap code in try/except)
#with open('test.txt', 'r') as r:
#content = r.read()
#print content
#Search the entire content for '@' and replace it with time stamp.
new_content = re.sub(r'@.*', ist, content)
print new_content
#open file for write and close it neatly(wrap code in try/except)
with open('test.txt', 'w') as f:
f.write(new_content)
print "torpid loop complete"
time.sleep(5)
| cloud-engineering/Torpid | main.py | Python | mit | 777 |
'''
This module corresponds to ARDroneLib/Soft/Common/navdata_common.h
'''
import ctypes
import functools
from pyardrone.utils.structure import Structure
uint8_t = ctypes.c_uint8
uint16_t = ctypes.c_uint16
uint32_t = ctypes.c_uint32
int16_t = ctypes.c_int16
int32_t = ctypes.c_int32
bool_t = ctypes.c_uint32 # ARDroneTool's bool is 4 bytes
char = ctypes.c_char
float32_t = ctypes.c_float
NB_GYROS = 3
NB_ACCS = 3
NB_NAVDATA_DETECTION_RESULTS = 4
NB_CORNER_TRACKERS_WIDTH = 5
NB_CORNER_TRACKERS_HEIGHT = 4
DEFAULT_NB_TRACKERS_WIDTH = NB_CORNER_TRACKERS_WIDTH + 1
DEFAULT_NB_TRACKERS_HEIGHT = NB_CORNER_TRACKERS_HEIGHT + 1
NAVDATA_MAX_CUSTOM_TIME_SAVE = 20
_vector31_t = float32_t * 3
_velocities_t = _vector31_t
_vector21_t = float32_t * 2
_screen_point_t = int32_t * 2
_matrix33_t = float32_t * 3 * 3
class OptionHeader(dict):
def register(self, tag):
return functools.partial(self._register, tag)
def _register(self, tag, function):
if tag in self:
raise KeyError('Key {!r} conflict with existing item {}'.format(
tag, self[tag]))
self[tag] = function
return function
index = OptionHeader()
class Metadata(Structure):
'''
Header of :py:class:`~pyardrone.navdata.NavData`.
Available via :py:class:`~pyardrone.navdata.NavData`.metadata
Corresponds to C struct ``navdata_t``.
'''
_pack_ = 1
_attrname = 'metadata'
header = uint32_t #: Should be 0x55667788
#: raw drone state,
#: see also: :py:class:`~pyardrone.navdata.states.DroneState`
state = uint32_t
sequence_number = uint32_t #:
vision_flag = uint32_t #:
class OptionHeader(Structure):
_pack_ = 1
tag = uint16_t
size = uint16_t
@index.register(0)
class Demo(OptionHeader):
'''
Minimal navigation data for all flights.
Corresponds to C struct ``navdata_demo_t``.
'''
_attrname = 'demo'
#: Flying state (landed, flying, hovering, etc.)
#: defined in CTRL_STATES enum.
ctrl_state = uint32_t
vbat_flying_percentage = uint32_t #: battery voltage filtered (mV)
theta = float32_t #: UAV's pitch in milli-degrees
phi = float32_t #: UAV's roll in milli-degrees
psi = float32_t #: UAV's yaw in milli-degrees
altitude = int32_t #: UAV's altitude in centimeters
vx = float32_t #: UAV's estimated linear velocity
vy = float32_t #: UAV's estimated linear velocity
vz = float32_t #: UAV's estimated linear velocity
#: streamed frame index Not used -> To integrate in video stage.
num_frames = uint32_t
# Camera parameters compute by detection
detection_camera_rot = _matrix33_t #: Deprecated ! Don't use !
detection_camera_trans = _vector31_t #: Deprecated ! Don't use !
detection_tag_index = uint32_t #: Deprecated ! Don't use !
detection_camera_type = uint32_t #: Type of tag searched in detection
# Camera parameters compute by drone
drone_camera_rot = _matrix33_t #: Deprecated ! Don't use !
drone_camera_trans = _vector31_t #: Deprecated ! Don't use !
@index.register(1)
class Time(OptionHeader):
'''
Timestamp
Corresponds to C struct ``navdata_time_t``.
'''
_attrname = 'time'
#: 32 bit value where the 11 most significant bits represents the seconds,
#: and the 21 least significant bits are the microseconds.
time = uint32_t
@index.register(2)
class RawMeasures(OptionHeader):
'''
Raw sensors measurements
Corresponds to C struct ``navdata_raw_measures_t``.
'''
_attrname = 'raw_measures'
# +12 bytes
raw_accs = uint16_t * NB_ACCS #: filtered accelerometers
raw_gyros = int16_t * NB_GYROS #: filtered gyrometers
raw_gyros_110 = int16_t * 2 #: gyrometers x/y 110 deg/s
vbat_raw = uint32_t #: battery voltage raw (mV)
us_debut_echo = uint16_t
us_fin_echo = uint16_t
us_association_echo = uint16_t
us_distance_echo = uint16_t
us_courbe_temps = uint16_t
us_courbe_valeur = uint16_t
us_courbe_ref = uint16_t
flag_echo_ini = uint16_t
# TODO: uint16_t frame_number from ARDrone_Magneto
nb_echo = uint16_t
sum_echo = uint32_t
alt_temp_raw = int32_t
gradient = int16_t
@index.register(21)
class PressureRaw(OptionHeader):
'Corresponds to C struct ``navdata_pressure_raw_t``.'
_attrname = 'pressure_raw'
up = int32_t
ut = int16_t
Temperature_meas = int32_t
Pression_meas = int32_t
@index.register(22)
class Magneto(OptionHeader):
'Corresponds to C struct ``navdata_magneto_t``.'
_attrname = 'magneto'
mx = int16_t
my = int16_t
mz = int16_t
magneto_raw = _vector31_t #: magneto in the body frame, in mG
magneto_rectified = _vector31_t
magneto_offset = _vector31_t
heading_unwrapped = float32_t
heading_gyro_unwrapped = float32_t
heading_fusion_unwrapped = float32_t
magneto_calibration_ok = char
magneto_state = uint32_t
magneto_radius = float32_t
error_mean = float32_t
error_var = float32_t
@index.register(23)
class WindSpeed(OptionHeader):
'Corresponds to C struct ``navdata_wind_speed_t``.'
_attrname = 'wind_speed'
wind_speed = float32_t #: estimated wind speed [m/s]
#: estimated wind direction in North-East frame [rad] e.g.
#: if wind_angle is pi/4, wind is from South-West to North-East
wind_angle = float32_t
wind_compensation_theta = float32_t
wind_compensation_phi = float32_t
state_x1 = float32_t
state_x2 = float32_t
state_x3 = float32_t
state_x4 = float32_t
state_x5 = float32_t
state_x6 = float32_t
magneto_debug1 = float32_t
magneto_debug2 = float32_t
magneto_debug3 = float32_t
@index.register(24)
class KalmanPressure(OptionHeader):
'Corresponds to C struct ``navdata_kalman_pressure_t``.'
_attrname = 'kalman_pressure'
offset_pressure = float32_t
est_z = float32_t
est_zdot = float32_t
est_bias_PWM = float32_t
est_biais_pression = float32_t
offset_US = float32_t
prediction_US = float32_t
cov_alt = float32_t
cov_PWM = float32_t
cov_vitesse = float32_t
bool_effet_sol = bool_t
somme_inno = float32_t
flag_rejet_US = bool_t
u_multisinus = float32_t
gaz_altitude = float32_t
Flag_multisinus = bool_t
Flag_multisinus_debut = bool_t
@index.register(27)
class Zimmu3000(OptionHeader):
'Corresponds to C struct ``navdata_zimmu_3000_t``.'
_attrname = 'zimmu_3000'
vzimmuLSB = int32_t
vzfind = float32_t
@index.register(3)
class PhysMeasures(OptionHeader):
'Corresponds to C struct ``navdata_phys_measures_t``.'
_attrname = 'phys_measures'
accs_temp = float32_t
gyro_temp = uint16_t
phys_accs = float32_t * NB_ACCS
phys_gyros = float32_t * NB_GYROS
alim3V3 = uint32_t #: 3.3volt alim [LSB]
vrefEpson = uint32_t #: ref volt Epson gyro [LSB]
vrefIDG = uint32_t #: ref volt IDG gyro [LSB]
@index.register(4)
class GyrosOffsets(OptionHeader):
'Corresponds to C struct ``navdata_gyros_offsets_t``.'
_attrname = 'gyros_offsets'
offset_g = float32_t * NB_GYROS
@index.register(5)
class EulerAngles(OptionHeader):
'Corresponds to C struct ``navdata_euler_angles_t``.'
_attrname = 'eular_angles'
theta_a = float32_t
phi_a = float32_t
@index.register(6)
class References(OptionHeader):
'Corresponds to C struct ``navdata_references_t``.'
_attrname = 'references'
ref_theta = int32_t
ref_phi = int32_t
ref_theta_I = int32_t
ref_phi_I = int32_t
ref_pitch = int32_t
ref_roll = int32_t
ref_yaw = int32_t
ref_psi = int32_t
vx_ref = float32_t
vy_ref = float32_t
theta_mod = float32_t
phi_mod = float32_t
k_v_x = float32_t
k_v_y = float32_t
k_mode = uint32_t
ui_time = float32_t
ui_theta = float32_t
ui_phi = float32_t
ui_psi = float32_t
ui_psi_accuracy = float32_t
ui_seq = int32_t
@index.register(7)
class Trims(OptionHeader):
'Corresponds to C struct ``navdata_trims_t``.'
_attrname = 'trims'
angular_rates_trim_r = float32_t
euler_angles_trim_theta = float32_t
euler_angles_trim_phi = float32_t
@index.register(8)
class RcReferences(OptionHeader):
'Corresponds to C struct ``navdata_rc_references_t``.'
_attrname = 'rc_references'
rc_ref_pitch = int32_t
rc_ref_roll = int32_t
rc_ref_yaw = int32_t
rc_ref_gaz = int32_t
rc_ref_ag = int32_t
@index.register(9)
class Pwm(OptionHeader):
'Corresponds to C struct ``navdata_pwm_t``.'
_attrname = 'pwm'
motor1 = uint8_t
motor2 = uint8_t
motor3 = uint8_t
motor4 = uint8_t
sat_motor1 = uint8_t
sat_motor2 = uint8_t
sat_motor3 = uint8_t
sat_motor4 = uint8_t
gaz_feed_forward = float32_t
gaz_altitude = float32_t
altitude_integral = float32_t
vz_ref = float32_t
u_pitch = int32_t
u_roll = int32_t
u_yaw = int32_t
yaw_u_I = float32_t
u_pitch_planif = int32_t
u_roll_planif = int32_t
u_yaw_planif = int32_t
u_gaz_planif = float32_t
current_motor1 = uint16_t
current_motor2 = uint16_t
current_motor3 = uint16_t
current_motor4 = uint16_t
# WARNING: new navdata (FC 26/07/2011)
altitude_prop = float32_t
altitude_der = float32_t
@index.register(10)
class Altitude(OptionHeader):
'Corresponds to C struct ``navdata_altitude_t``.'
_attrname = 'altitude'
altitude_vision = int32_t
altitude_vz = float32_t
altitude_ref = int32_t
altitude_raw = int32_t
obs_accZ = float32_t
obs_alt = float32_t
obs_x = _vector31_t
obs_state = uint32_t
est_vb = _vector21_t
est_state = uint32_t
@index.register(11)
class VisionRaw(OptionHeader):
'Corresponds to C struct ``navdata_vision_raw_t``.'
_attrname = 'vision_raw'
vision_tx_raw = float32_t
vision_ty_raw = float32_t
vision_tz_raw = float32_t
@index.register(13)
class Vision(OptionHeader):
'Corresponds to C struct ``navdata_vision_t``.'
_attrname = 'vision'
vision_state = uint32_t
vision_misc = int32_t
vision_phi_trim = float32_t
vision_phi_ref_prop = float32_t
vision_theta_trim = float32_t
vision_theta_ref_prop = float32_t
new_raw_picture = int32_t
theta_capture = float32_t
phi_capture = float32_t
psi_capture = float32_t
altitude_capture = int32_t
time_capture = uint32_t #: time in TSECDEC format (see config.h)
body_v = _velocities_t
delta_phi = float32_t
delta_theta = float32_t
delta_psi = float32_t
gold_defined = uint32_t
gold_reset = uint32_t
gold_x = float32_t
gold_y = float32_t
@index.register(14)
class VisionPerf(OptionHeader):
'Corresponds to C struct ``navdata_vision_perf_t``.'
_attrname = 'vision_perf'
time_szo = float32_t
time_corners = float32_t
time_compute = float32_t
time_tracking = float32_t
time_trans = float32_t
time_update = float32_t
time_custom = float32_t * NAVDATA_MAX_CUSTOM_TIME_SAVE
@index.register(15)
class TrackersSend(OptionHeader):
'Corresponds to C struct ``navdata_trackers_send_t``.'
_attrname = 'trackers_send'
locked = int32_t * (DEFAULT_NB_TRACKERS_WIDTH * DEFAULT_NB_TRACKERS_HEIGHT)
point = _screen_point_t * (
DEFAULT_NB_TRACKERS_WIDTH * DEFAULT_NB_TRACKERS_HEIGHT
)
@index.register(16)
class VisionDetect(OptionHeader):
'Corresponds to C struct ``navdata_vision_detect_t``.'
# Change the function 'navdata_server_reset_vision_detect()'
# if this structure is modified
_attrname = 'vision_detect'
nb_detected = uint32_t
type = uint32_t * NB_NAVDATA_DETECTION_RESULTS
xc = uint32_t * NB_NAVDATA_DETECTION_RESULTS
yc = uint32_t * NB_NAVDATA_DETECTION_RESULTS
width = uint32_t * NB_NAVDATA_DETECTION_RESULTS
height = uint32_t * NB_NAVDATA_DETECTION_RESULTS
dist = uint32_t * NB_NAVDATA_DETECTION_RESULTS
orientation_angle = float32_t * NB_NAVDATA_DETECTION_RESULTS
rotation = _matrix33_t * NB_NAVDATA_DETECTION_RESULTS
translation = _vector31_t * NB_NAVDATA_DETECTION_RESULTS
camera_source = uint32_t * NB_NAVDATA_DETECTION_RESULTS
@index.register(12)
class VisionOf(OptionHeader):
'Corresponds to C struct ``navdata_vision_of_t``.'
_attrname = 'vision_of'
of_dx = float32_t * 5
of_dy = float32_t * 5
@index.register(17)
class Watchdog(OptionHeader):
'Corresponds to C struct ``navdata_watchdog_t``.'
_attrname = 'watchdog'
# +4 bytes
watchdog = int32_t
@index.register(18)
class AdcDataFrame(OptionHeader):
'Corresponds to C struct ``navdata_adc_data_frame_t``.'
_attrname = 'adc_data_frame'
version = uint32_t
data_frame = uint8_t * 32
@index.register(19)
class VideoStream(OptionHeader):
'Corresponds to C struct ``navdata_video_stream_t``.'
_attrname = 'video_stream'
quant = uint8_t #: quantizer reference used to encode frame [1:31]
frame_size = uint32_t #: frame size (bytes)
frame_number = uint32_t #: frame index
atcmd_ref_seq = uint32_t #: atmcd ref sequence number
#: mean time between two consecutive atcmd_ref (ms)
atcmd_mean_ref_gap = uint32_t
atcmd_var_ref_gap = float32_t
atcmd_ref_quality = uint32_t #: estimator of atcmd link quality
# drone2
#: measured out throughput from the video tcp socket
out_bitrate = uint32_t
#: last frame size generated by the video encoder
desired_bitrate = uint32_t
# misc temporary data
data1 = int32_t
data2 = int32_t
data3 = int32_t
data4 = int32_t
data5 = int32_t
# queue usage
tcp_queue_level = uint32_t
fifo_queue_level = uint32_t
@index.register(25)
class HdvideoStream(OptionHeader):
'Corresponds to C struct ``navdata_hdvideo_stream_t``.'
_attrname = 'hdvideo_stream'
hdvideo_state = uint32_t
storage_fifo_nb_packets = uint32_t
storage_fifo_size = uint32_t
usbkey_size = uint32_t #: USB key in kbytes - 0 if no key present
#: USB key free space in kbytes - 0 if no key present
usbkey_freespace = uint32_t
#: 'frame_number' PaVE field of the frame starting to be encoded for the
#: HD stream
frame_number = uint32_t
usbkey_remaining_time = uint32_t #: time in seconds
@index.register(20)
class Games(OptionHeader):
'Corresponds to C struct ``navdata_games_t``.'
_attrname = 'games'
double_tap_counter = uint32_t
finish_line_counter = uint32_t
@index.register(26)
class Wifi(OptionHeader):
'Corresponds to C struct ``navdata_wifi_t``.'
_attrname = 'wifi'
link_quality = uint32_t
@index.register(0xFFFF)
class Cks(OptionHeader):
'Corresponds to C struct ``navdata_cks_t``.'
_attrname = 'cks'
value = uint32_t #: Value of the checksum
| afg984/pyardrone | pyardrone/navdata/options.py | Python | mit | 14,941 |
# -*- coding: utf-8 -*-
#!/usr/bin/python
import numpy as np
import scipy
from sklearn import preprocessing
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from collections import Counter
from scipy.stats.stats import pearsonr
import data_readers
import feature_extractors as fe
import label_transformers as lt
import training_functions as training
import utils
def build_dataset(reader, phi_list, class_func, vectorizer=None, verbose=False):
"""Core general function for building experimental
hand-generated feature datasets.
Parameters
----------
reader : iterator
Should follow the format of data_readers. This is the dataset
we'll be featurizing.
phi_list : array of feature functions (default: [`manual_content_flags`])
Any function that takes a string as input and returns a
bool/int/float-valued dict as output.
class_func : function on the labels
A function that modifies the labels based on the experimental
design. If `class_func` returns None for a label, then that
item is ignored.
vectorizer : sklearn.feature_extraction.DictVectorizer
If this is None, then a new `DictVectorizer` is created and
used to turn the list of dicts created by `phi` into a
feature matrix. This happens when we are training.
If this is not None, then it's assumed to be a `DictVectorizer`
and used to transform the list of dicts. This happens in
assessment, when we take in new instances and need to
featurize them as we did in training.
Returns
-------
dict
A dict with keys 'X' (the feature matrix), 'y' (the list of
labels), 'vectorizer' (the `DictVectorizer`), and
'raw_examples' (the example strings, for error analysis).
"""
labels = []
feat_dicts = []
raw_examples = []
rows = []
for i, (paragraph, parse, label) in enumerate(reader()):
if i % 100 == 0:
print " Starting feature extraction for unit #%d " % (i+1)
cls = class_func(label)
#print label, cls
if cls != None:
labels.append(cls)
raw_examples.append(paragraph)
if verbose:
print cls, ":", paragraph
features = Counter()
for phi in phi_list:
cur_feats = phi(paragraph, parse)
if cur_feats is None:
continue
# If we won't accidentally blow away data, merge 'em.
overlap_feature_names = features.viewkeys() & cur_feats.viewkeys()
if verbose and len(overlap_feature_names) > 0:
print "Note: Overlap features are ", overlap_feature_names
features |= cur_feats
rows.append(cur_feats['row'])
feat_dicts.append(features)
if verbose:
print features
print
print "Completed all feature extraction: %d units" % (i+1)
# In training, we want a new vectorizer, but in
# assessment, we featurize using the existing vectorizer:
feat_matrix = None
if vectorizer == None:
vectorizer = DictVectorizer(sparse=True)
feat_matrix = vectorizer.fit_transform(feat_dicts)
else:
feat_matrix = vectorizer.transform(feat_dicts)
return {'X': feat_matrix,
'y': labels,
'vectorizer': vectorizer,
'raw_examples': raw_examples}
def experiment_features(
train_reader=data_readers.toy,
assess_reader=None,
train_size=0.7,
phi_list=[fe.manual_content_flags],
class_func=lt.identity_class_func,
train_func=training.fit_logistic_at_with_crossvalidation,
score_func=scipy.stats.stats.pearsonr,
verbose=True):
"""Generic experimental framework for hand-crafted features.
Either assesses with a random train/test split of `train_reader`
or with `assess_reader` if it is given.
Parameters
----------
train_reader : data iterator (default: `train_reader`)
Iterator for training data.
assess_reader : iterator or None (default: None)
If None, then the data from `train_reader` are split into
a random train/test split, with the the train percentage
determined by `train_size`. If not None, then this should
be an iterator for assessment data (e.g., `dev_reader`).
train_size : float (default: 0.7)
If `assess_reader` is None, then this is the percentage of
`train_reader` devoted to training. If `assess_reader` is
not None, then this value is ignored.
phi_list : array of feature functions (default: [`manual_content_flags`])
Any function that takes a string as input and returns a
bool/int/float-valued dict as output.
class_func : function on the labels
A function that modifies the labels based on the experimental
design. If `class_func` returns None for a label, then that
item is ignored.
train_func : model wrapper (default: `fit_logistic_at_with_crossvalidation`)
Any function that takes a feature matrix and a label list
as its values and returns a fitted model with a `predict`
function that operates on feature matrices.
score_metric : function name (default: `utils.safe_weighted_f1`)
This should be an `sklearn.metrics` scoring function. The
default is weighted average F1.
verbose : bool (default: True)
Whether to print out the model assessment to standard output.
Prints
-------
To standard output, if `verbose=True`
Model confusion matrix and a model precision/recall/F1 report.
Returns
-------
float
The overall scoring metric for assess set as determined by `score_metric`.
float
The overall Cronbach's alpha for assess set
np.array
The confusion matrix (rows are truth, columns are predictions)
list of dictionaries
A list of {truth:_ , prediction:_, example:_} dicts on the assessment data
"""
# Train dataset:
train = build_dataset(train_reader, phi_list, class_func, vectorizer=None, verbose=verbose)
# Manage the assessment set-up:
indices = np.arange(0, len(train['y']))
X_train = train['X']
y_train = np.array(train['y'])
train_examples = np.array(train['raw_examples'])
X_assess = None
y_assess = None
assess_examples = None
if assess_reader == None:
print " Raw y training distribution:"
print " ", np.bincount(y_train)[1:]
indices_train, indices_assess, y_train, y_assess = train_test_split(
indices, y_train, train_size=train_size, stratify=y_train)
X_assess = X_train[indices_assess]
assess_examples = train_examples[indices_assess]
X_train = X_train[indices_train]
train_examples = train_examples[indices_train]
print " Train y distribution:"
print " ", np.bincount(y_train)[1:]
print " Test y distribution:"
print " ", np.bincount(y_assess)[1:]
else:
assess = build_dataset(
assess_reader,
phi_list,
class_func,
vectorizer=train['vectorizer'])
X_assess, y_assess, assess_examples = assess['X'], assess['y'], np.array(assess['raw_examples'])
# Normalize:
nonzero_cells = len(X_train.nonzero()[0])
total_cells = 1.*X_train.shape[0] * X_train.shape[1]
proportion_nonzero = nonzero_cells/total_cells
print "sparsity: %g/1 are nonzero" % proportion_nonzero
if proportion_nonzero > 0.5: # if dense matrix
X_train = X_train.toarray()
X_assess = X_assess.toarray()
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_assess = scaler.transform(X_assess)
else:
scaler = preprocessing.MaxAbsScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_assess = scaler.transform(X_assess)
# Train:
mod = train_func(X_train, y_train)
# Predictions:
predictions_on_assess = mod.predict(X_assess)
assess_performance = get_score_example_pairs(y_assess, predictions_on_assess, assess_examples)
predictions_on_train = mod.predict(X_train)
train_performance = get_score_example_pairs(y_train, predictions_on_train, train_examples)
# Report:
if verbose:
print "\n-- TRAINING RESULTS --"
print_verbose_overview(y_train, predictions_on_train)
print "\n-- ASSESSMENT RESULTS --"
print_verbose_overview(y_assess, predictions_on_assess)
try:
the_score = score_func(y_assess, predictions_on_assess)
except:
the_score = (0,0)
# Return the overall results on the assessment data:
return the_score, \
utils.cronbach_alpha(y_assess, predictions_on_assess), \
confusion_matrix(y_assess, predictions_on_assess), \
assess_performance
def get_score_example_pairs(y, y_hat, examples):
""" Return a list of dicts: {truth score, predicted score, example} """
paired_results = sorted(zip(y, y_hat), key=lambda x: x[0]-x[1])
performance = []
for i, (truth, prediction) in enumerate(paired_results):
performance.append({"truth": truth, "prediction": prediction, "example": examples[i]})
return performance
def print_verbose_overview(y, yhat):
""" Print a performance overview """
print "Correlation: ", pearsonr(y, yhat)[0]
print "Alpha: ", utils.cronbach_alpha(y, yhat)
print "Classification report:"
print classification_report(y, yhat, digits=3)
print "Confusion matrix:"
print confusion_matrix(y, yhat)
print " (Rows are truth; columns are predictions)"
def experiment_features_iterated(
train_reader=data_readers.toy,
assess_reader=None,
train_size=0.7,
phi_list=[fe.manual_content_flags],
class_func=lt.identity_class_func,
train_func=training.fit_logistic_at_with_crossvalidation,
score_func=utils.safe_weighted_f1,
verbose=True,
iterations=1):
"""
Generic iterated experimental framework for hand-crafted features.
"""
correlation_overall = []
cronbach_overall = []
conf_matrix_overall = None
assess_performance = []
while len(correlation_overall) < iterations:
print "\nStarting iteration: %d/%d" % (len(correlation_overall)+1, iterations)
try:
correlation_local, cronbach_local, conf_matrix_local, perf_local = experiment_features(
train_reader=train_reader,
assess_reader=assess_reader,
train_size=train_size,
phi_list=phi_list,
class_func=class_func,
train_func=train_func,
score_func=score_func,
verbose=verbose)
correlation_overall.append(correlation_local[0])
cronbach_overall.append(cronbach_local)
assess_performance.extend(perf_local)
if conf_matrix_overall is None:
conf_matrix_overall = conf_matrix_local
else:
conf_matrix_overall += conf_matrix_local
except (ValueError,UserWarning) as e:
print e
if verbose:
print "\n-- OVERALL --"
print correlation_overall
print cronbach_overall
print conf_matrix_overall
return correlation_overall, cronbach_overall, conf_matrix_overall, assess_performance
| ptoman/icgauge | icgauge/experiment_frameworks.py | Python | mit | 11,989 |
import sys, os, fabric
class PiServicePolicies:
@staticmethod
def is_local():
return (not fabric.api.env.hosts or fabric.api.env.hosts[0] in ['localhost', '127.0.0.1', '::1'])
@staticmethod
def is_pi():
return os.path.isdir('/home/pi')
@staticmethod
def check_local_or_exit():
if not PiServicePolicies.is_local():
print "...only callable on localhost!!!"
sys.exit(-1)
@staticmethod
def check_remote_or_exit():
if PiServicePolicies.is_local():
print "...only callable on remote host!!!"
sys.exit(-1)
def check_installed_or_exit(self):
if not PiServicePolicies.installed(self):
print "...first you have to install this service! fab pi %s:install"
sys.exit(-1)
def installed(self):
ret = self.file_exists('__init__.py')
if not ret: print self.name+' not installed'
return ret
| creative-workflow/pi-setup | lib/piservices/policies.py | Python | mit | 867 |
import cv2, numpy as np
from dolphintracker.singlecam_tracker.camera_filter.FindDolphin import SearchBlobs
from dolphintracker.singlecam_tracker.camera_filter.BackGroundDetector import BackGroundDetector
import datetime
class PoolCamera(object):
def __init__(self, videofile, name, scene, maskObjectsNames, filters, frames_range=None):
self.name = name
self.videoCap = cv2.VideoCapture(videofile)
self.scene = scene
self.filters = filters
self.mask = self.create_mask(maskObjectsNames)
self.frames_range = frames_range
self._searchblobs = SearchBlobs()
self._backgrounds = []
self._last_centroid = None
if self.frames_range is not None:
self.videoCap.set(cv2.CAP_PROP_POS_FRAMES, self.frames_range[0])
print('set first frame', self.frames_range)
self._total_frames = self.videoCap.get(7)
self._colors = [(255,0,0),(0,255,0),(0,0,255)]
def create_mask(self, objectsNames):
mask = np.zeros((self.img_height,self.img_width), np.uint8)
for objname in objectsNames:
obj = self.scene.getObject(objname)
ptsProjection = self.points_projection( [p for p in obj.points if p[2]<0.2] )
hull = cv2.convexHull(np.int32(ptsProjection))
cv2.fillPoly(mask, np.int32([hull]), 255)
return mask
def read(self):
res, self.frame = self.videoCap.read()
if res:
self.originalFrame = self.frame.copy()
else:
self.originalFrame = None
return res
def process(self):
if len(self._backgrounds)==0:
for i, colorFilter in enumerate(self.filters):
firstFrame = self.frame_index
bgDetector = BackGroundDetector(capture=self.videoCap, filterFunction=colorFilter.process)
print('Background detection parameters', self._total_frames*0.04, self._total_frames*0.03)
last_frame = self.frames_range[1] if self.frames_range is not None else None
bg = bgDetector.detect(int(self._total_frames*0.04), int(self._total_frames*0.03), 180, last_frame)
bg = cv2.dilate( bg, kernel=cv2.getStructuringElement( cv2.MORPH_RECT, (5,5) ), iterations=2 )
bg = 255-bg
bg[bg<255]=0
self._backgrounds.append( cv2.bitwise_and(bg, self.mask) )
self.frame_index = firstFrame
result = []
for i, colorFilter in enumerate(self.filters):
filterResult = colorFilter.filter(self.frame, self._backgrounds[i])
blobs = self._searchblobs.process(filterResult)
res = blobs[0] if len(blobs)>=1 else None
result.append(res)
return result
def create_empty_mask(self): return np.zeros( (self.img_height, self.img_width), np.uint8 )
def points_projection(self, points): cam = self.scene_camera; return [cam.calcPixel(*p) for p in points]
@property
def scene_camera(self): return self.scene.getCamera(self.name)
@property
def img_width(self): return int( self.videoCap.get(cv2.CAP_PROP_FRAME_WIDTH) )
@property
def img_height(self): return int( self.videoCap.get(cv2.CAP_PROP_FRAME_HEIGHT) )
@property
def fps(self): return int( self.videoCap.get(cv2.CAP_PROP_FPS) )
@property
def frame_index(self): return int( self.videoCap.get(cv2.CAP_PROP_POS_FRAMES) )
@frame_index.setter
def frame_index(self, value): self.videoCap.set(cv2.CAP_PROP_POS_FRAMES, value)
@property
def currentTime(self):
milli = self.videoCap.get(cv2.CAP_PROP_POS_MSEC)
return datetime.timedelta(milliseconds=milli)
@property
def totalFrames(self): return self.videoCap.get(cv2.CAP_PROP_FRAME_COUNT) | UmSenhorQualquer/d-track | dolphintracker/singlecam_tracker/pool_camera.py | Python | mit | 3,437 |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: qb_docker_image
short_description: QB extension of Ansible's `docker_image` module.
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
options:
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
required: false
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
required: false
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
default: Dockerfile
required: false
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists.
default: false
required: false
version_added: "2.1"
type: bool
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
required: false
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
required: true
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
aliases:
- build_path
required: false
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
default: true
required: false
version_added: "2.1"
type: bool
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
default: false
required: false
version_added: "2.2"
type: bool
rm:
description:
- Remove intermediate containers after build.
default: true
required: false
version_added: "2.1"
type: bool
nocache:
description:
- Do not use cache when building an image.
default: false
required: false
type: bool
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
required: false
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded. By default the image will be pulled
from Docker Hub. To build the image, provide a path value set to a directory containing a context and
Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a
repository, provide a repository path. If the name contains a repository path, it will be pushed.
- "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the
same as C(present)."
required: false
default: present
choices:
- absent
- present
- build
tag:
description:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence.
default: latest
required: false
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21 and docker-py >= 1.7.0.
required: false
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
required: false
version_added: "2.1"
suboptions:
memory:
description:
- Set memory limit for build.
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
cpushares:
description:
- CPU shares (relative weight).
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the
server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters."
choices:
- no
- encrypt
- verify
default: no
required: false
version_added: "2.0"
try_to_pull:
description:
- Try to pull the image before building. Added by QB.
choices:
- yes
- no
default: yes
required: false
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
author:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
- James Tanner (@jctanner)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7
repository: dcoppenhagan/myimage
tag: 7.0
push: yes
- name: Tag and push to local registry
docker_image:
name: centos
repository: localhost:5000/centos
tag: 7
push: yes
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
- name: Build image and with buildargs
docker_image:
path: /path/to/build/dir
name: myimage
buildargs:
log_volume: /var/log/myapp
listen_port: 8080
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
'''
import os
import re
import json
import socket
import threading
import logging
from ansible.module_utils.docker_common import HAS_DOCKER_PY_2, AnsibleDockerClient, DockerBaseClass
from ansible.module_utils._text import to_native
try:
if HAS_DOCKER_PY_2:
from docker.auth import resolve_repository_name
else:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
except ImportError:
# missing docker-py handled in docker_common
pass
import qb.ipc.stdio
import qb.ipc.stdio.logging_
# from qb.ipc.stdio import client as io_client, logging_ as stdio_logging
# import qb.ipc.stdio.logging_
# from qb.ipc.stdio import
logger = qb.ipc.stdio.logging_.getLogger('qb_docker_image')
class QBAnsibleDockerClient( AnsibleDockerClient ):
def try_pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("(Try) Pulling image %s:%s" % (name, tag))
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
return None
except Exception as exc:
self.log("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
return None
return self.find_image(name=name, tag=tag)
def log(self, msg, pretty_print=False):
qb_log(msg)
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.archive_path = parameters.get('archive_path')
self.container_limits = parameters.get('container_limits')
self.dockerfile = parameters.get('dockerfile')
self.force = parameters.get('force')
self.load_path = parameters.get('load_path')
self.name = parameters.get('name')
self.nocache = parameters.get('nocache')
self.path = parameters.get('path')
self.pull = parameters.get('pull')
self.repository = parameters.get('repository')
self.rm = parameters.get('rm')
self.state = parameters.get('state')
self.tag = parameters.get('tag')
self.http_timeout = parameters.get('http_timeout')
self.push = parameters.get('push')
self.buildargs = parameters.get('buildargs')
# QB additions
self.try_to_pull = parameters.get('try_to_pull')
self.logger = qb.ipc.stdio.logging_.getLogger(
'qb_docker_image:ImageManager',
level = logging.DEBUG
)
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state in ['present', 'build']:
self.present()
elif self.state == 'absent':
self.absent()
def fail(self, msg):
self.client.fail(msg)
def present(self):
'''
Handles state = 'present', which includes building, loading or pulling
an image, depending on user provided parameters.
:returns None
'''
self.logger.info("Starting state=present...")
image = self.client.find_image(name=self.name, tag=self.tag)
pulled_image = None
if not image or self.force:
if self.try_to_pull:
self.log("Try to pull the image")
self.results['actions'].append(
'Tried to pull image %s:%s' % (self.name, self.tag)
)
self.results['changed'] = True
if not self.check_mode:
pulled_image = self.client.try_pull_image(self.name, tag=self.tag)
if pulled_image:
self.results['actions'].append(
'Pulled image %s:%s' % (self.name, self.tag)
)
self.results['image'] = pulled_image
if pulled_image is None:
if self.path:
# Build the image
if not os.path.isdir(self.path):
self.fail("Requested build path %s could not be found or you do not have access." % self.path)
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.build_image()
elif self.load_path:
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
else:
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.client.pull_image(self.name, tag=self.tag)
if image and image == self.results['image']:
self.results['changed'] = False
if self.archive_path:
self.archive_image(self.name, self.tag)
# Only push if:
#
# 1. We didn't pull the image (if we did pull it we have no need to
# then push it).
# 2. We have a local image or image result (or what are we pushing)
# have_image = image or len(self.result['image']) > 0
# 3. Either:
# A. We didn't find any image before doing anything
# B. The resulting image is different
#
# image_is_different = (
# (not image) or (
# len(self.results image[u'Id'] != self.results['image'][u'Id']
self.logger.debug("Deciding to push...")
if (
pulled_image is None and (
image or self.result['image']
) and (
((not image) and self.results['image']) or
(image and self.results['image'] and image['Id'] != self.results['Id'])
)
):
self.logger.debug("Into push section!")
# self.log("have_image: {}".format(have_image))
# self.log("image_is_different: {}".format(image_is_different))
self.logger.debug("Image", image)
# if self.push and not self.repository:
# self.push_image(self.name, self.tag)
# elif self.repository:
# self.tag_image(
# self.name,
# self.tag,
# self.repository,
# force=self.force,
# push=self.push
# )
def absent(self):
'''
Handles state = 'absent', which removes an image.
:return None
'''
image = self.client.find_image(self.name, self.tag)
if image:
name = self.name
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if not self.check_mode:
try:
self.client.remove_image(name, force=self.force)
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, str(exc)))
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['image']['state'] = 'Deleted'
def archive_image(self, name, tag):
'''
Archive an image to a .tar file. Called when archive_path is passed.
:param name - name of the image. Type: str
:return None
'''
if not tag:
tag = "latest"
image = self.client.find_image(name=name, tag=tag)
if not image:
self.log("archive image: image %s:%s not found" % (name, tag))
return
image_name = "%s:%s" % (name, tag)
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
self.results['changed'] = True
if not self.check_mode:
self.log("Getting archive of image %s" % image_name)
try:
image = self.client.get_image(image_name)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
try:
with open(self.archive_path, 'w') as fd:
for chunk in image.stream(2048, decode_content=False):
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
image = self.client.find_image(name=name, tag=tag)
if image:
self.results['image'] = image
def push_image(self, name, tag=None):
'''
If the name of the image contains a repository path, then push the image.
:param name Name of the image to push.
:param tag Use a specific tag.
:return: None
'''
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['changed'] = True
if not self.check_mode:
status = None
try:
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
except Exception as exc:
if re.search('unauthorized', str(exc)):
if re.search('authentication required', str(exc)):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, str(exc), registry))
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
self.results['image']['push_status'] = status
def tag_image(self, name, tag, repository, force=False, push=False):
'''
Tag an image into a repository.
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param force: bool. force tagging, even it image already exists with the repository path.
:param push: bool. push the image once it's tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = "latest"
if tag:
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
if not image or force:
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
self.results['changed'] = True
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
image_name = name
if tag and not re.search(tag, name):
image_name = "%s:%s" % (name, tag)
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
if not tag_status:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % str(exc))
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if push:
self.push_image(repo, repo_tag)
def build_image(self):
'''
Build an image
:return: image dict
'''
params = dict(
path=self.path,
tag=self.name,
rm=self.rm,
nocache=self.nocache,
stream=True,
timeout=self.http_timeout,
pull=self.pull,
forcerm=self.rm,
dockerfile=self.dockerfile,
decode=True
)
build_output = []
if self.tag:
params['tag'] = "%s:%s" % (self.name, self.tag)
if self.container_limits:
params['container_limits'] = self.container_limits
if self.buildargs:
for key, value in self.buildargs.items():
self.buildargs[key] = to_native(value)
params['buildargs'] = self.buildargs
for line in self.client.build(**params):
# line = json.loads(line)
self.log(line, pretty_print=True)
if "stream" in line:
build_output.append(line["stream"])
if line.get('error'):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail(
"Error building %s - code: %s, message: %s, logs: %s" % (
self.name,
errorDetail.get('code'),
errorDetail.get('message'),
build_output))
else:
self.fail("Error building %s - message: %s, logs: %s" % (
self.name, line.get('error'), build_output))
return self.client.find_image(name=self.name, tag=self.tag)
def load_image(self):
'''
Load an image from a .tar archive
:return: image dict
'''
try:
self.log("Opening image %s" % self.load_path)
image_tar = open(self.load_path, 'r')
except Exception as exc:
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
try:
self.log("Loading image from %s" % self.load_path)
self.client.load_image(image_tar)
except Exception as exc:
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
try:
image_tar.close()
except Exception as exc:
self.fail("Error closing image %s - %s" % (self.name, str(exc)))
return self.client.find_image(self.name, self.tag)
def log(self, msg, pretty_print=False):
return qb_log(msg)
def warn( self, warning ):
self.results['warnings'].append( str(warning) )
def qb_log( msg ):
if not qb.ipc.stdio.client.stdout.connected:
return False
string = None
if isinstance( msg, str ):
string = msg
elif isinstance( msg, dict ):
if 'stream' in msg:
string = msg['stream']
else:
string = json.dumps(
msg,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
if string is not None:
if not string.endswith( u"\n" ):
string = string + u"\n"
qb.ipc.stdio.client.stdout.socket.sendall(string)
return True
def qb_debug(name, message, **payload):
if not qb.ipc.stdio.client.log.connected:
return False
struct = dict(
level='debug',
name=name,
pid=os.getpid(),
thread=threading.current_thread().name,
message=message,
payload=payload,
)
string = json.dumps(struct)
if not string.endswith( u"\n" ):
string = string + u"\n"
qb.ipc.stdio.client.log.socket.sendall(string)
return True
def main():
argument_spec = dict(
archive_path=dict(type='path'),
container_limits=dict(type='dict'),
dockerfile=dict(type='str'),
force=dict(type='bool', default=False),
http_timeout=dict(type='int'),
load_path=dict(type='path'),
name=dict(type='str', required=True),
nocache=dict(type='bool', default=False),
path=dict(type='path', aliases=['build_path']),
pull=dict(type='bool', default=True),
push=dict(type='bool', default=False),
repository=dict(type='str'),
rm=dict(type='bool', default=True),
state=dict(type='str', choices=['absent', 'present', 'build'], default='present'),
tag=dict(type='str', default='latest'),
use_tls=dict(type='str', default='no', choices=['no', 'encrypt', 'verify']),
buildargs=dict(type='dict', default=None),
# QB additions
try_to_pull=dict( type='bool', default=True ),
)
client = QBAnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
results = dict(
changed=False,
actions=[],
image={},
warnings=[],
)
qb.ipc.stdio.client.connect(results['warnings'])
logger.info("HERERERERE", extra=dict(payload=dict(x='ex', y='why?')))
ImageManager(client, results)
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| nrser/qb | dev/scratch/docker/image/qb_docker_image.scratch.py | Python | mit | 26,930 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('farms', '0024_rain_and_irrigation_allow_null'),
]
operations = [
migrations.AlterField(
model_name='probereading',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='probereading',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterhistory',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterhistory',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterregister',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterregister',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
]
| warnes/irrigatorpro | irrigator_pro/farms/migrations/0025_default_rain_irrigation_to_null.py | Python | mit | 2,383 |
#!/usr/bin/python
import math
def trapezint(f, a, b, n) :
"""
Just for testing - uses trapazoidal approximation from on f from a to b with
n trapazoids
"""
output = 0.0
for i in range(int(n)):
f_output_lower = f( a + i * (b - a) / n )
f_output_upper = f( a + (i + 1) * (b - a) / n )
output += (f_output_lower + f_output_upper) * ((b-a)/n) / 2
return output
def second_derivative_approximation(f, x, h = .001):
"""
Approximates the second derivative with h (dx) = .001
"""
return (f(x + h) - 2 * f(x) + f(x - h))/h**2
def adaptive_trapezint(f, a, b, eps=1E-5):
"""
Uses trapazoidal approximation on f from a to b with an error value
of less than epsilon, to calculate the number of trapazoids
"""
max_second_derivative = 0
for i in range(10000):
i_second_d = abs(second_derivative_approximation(f, a + i * (b - a)/10000))
if( i_second_d > max_second_derivative):
max_second_derivative = i_second_d
h = math.sqrt(12 * eps / ((b - a) * max_second_derivative))
#There is a clear problem here, as if the second derivative is zero,
#h will become too large and there will be no approximation
n = (b - a)/h
return trapezint(f, a, b, n)
| chapman-phys227-2016s/hw-1-seama107 | adaptive_trapezint.py | Python | mit | 1,279 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 16:25:41 2016
@author: pavel
"""
from gi.repository import Gtk
import parameter_types as ptypes
from logger import Logger
logger = Logger.get_logger()
#
import gobject
gobject.threads_init()
#decorator is used to update gtk objects from another thread
def idle_add_decorator(func):
def callback(*args):
gobject.idle_add(func, *args)
return callback
class GTK_Wrapper(object):
def get_gui_object(self):
raise NotImplementedError()
@staticmethod
def get_wrapper(obj):
wrapper = TYPE_MATCHES.get(type(obj), GTK_ReadOnlyWrapper)
return wrapper(obj)
class GTK_ReadOnlyWrapper(GTK_Wrapper):
def __init__(self, obj):
self.label = Gtk.Label()
self.label.set_text(repr(obj))
def get_gui_object(self):
return self.label
class GTK_ParamWrapper(GTK_Wrapper):
def __init__(self, parameter):
self.parameter = parameter
self.container = Gtk.Box(spacing=2)
self.container.set_homogeneous(False)
self._set_name(parameter.get_name())
self.param_gui_obj = None
self._set_gui_obj()
self._append_gui_obj()
def get_gui_object(self):
return self.container
def _set_name(self, name):
name_label = Gtk.Label()
name_label.set_text(name)
self.container.pack_start(name_label, True, True, 0)
def _append_gui_obj(self):
if self.param_gui_obj is not None:
self.container.pack_start(self.param_gui_obj, True, True, 0)
self.param_gui_obj.set_hexpand(True)
#override this with obj and add call
# also set on update callback
def _set_gui_obj(self):
self.param_gui_obj = None
# override this with method
def _on_update(self, widget, parameter_obj):
logger.to_log(widget, parameter_obj)
class GTK_ParamCheckBtn(GTK_ParamWrapper):
def _set_gui_obj(self):
self.param_gui_obj = Gtk.CheckButton()
self.param_gui_obj.set_active(self.parameter.get_val())
self.param_gui_obj.connect("toggled", self._on_update, self.parameter)
def _on_update(self, widget, param):
new_val = widget.get_active()
param.set_val(new_val)
class GTK_ParamTextBox(GTK_ParamWrapper):
def _set_gui_obj(self):
self.param_gui_obj = Gtk.Entry()
self.param_gui_obj.set_text(str(self.parameter.get_val()))
self.param_gui_obj.connect("changed", self._on_update, self.parameter)
def _on_update(self, widget, param):
new_val = widget.get_text()
if not param.set_val(new_val):
#if impossible to set new value restore previous one
widget.set_text(str(param.get_val()))
logger.to_log(new_val, widget)
class GTK_ParamList(GTK_ParamWrapper):
def _set_gui_obj(self):
#value to select by default
active_val = self.parameter.get_val()
active_ind = 0
counter = 0
store = Gtk.ListStore(str)
for val in self.parameter.allowed_vals():
store.append([str(val)])
if val == active_val:
active_ind = counter
counter += 1
self.param_gui_obj = Gtk.ComboBox.new_with_model_and_entry(store)
self.param_gui_obj.set_entry_text_column(0)
self.param_gui_obj.set_active(active_ind)
self.param_gui_obj.connect("changed", self._on_update, self.parameter)
def _on_update(self, combobox, param):
model = combobox.get_model()
active = combobox.get_active()
if active is not None and active >= 0:
new_val = model[active][0]
param.set_val(new_val)
logger.to_log(new_val, combobox)
class GTK_ParamSlider(GTK_ParamWrapper):
from math import log10
NUM_STEPS = 100
ORIENTATION = Gtk.Orientation.HORIZONTAL
def _set_gui_obj(self):
#(initial value, min value, max value,
# step increment - press cursor keys to see!,
# page increment - click around the handle to see!,
init_val = self.parameter.get_val()
min_val, max_val = self.parameter.get_range()
step = float(max_val - min_val) / GTK_ParamSlider.NUM_STEPS
adj = Gtk.Adjustment(init_val, min_val, max_val, step, step, 0)
self.param_gui_obj = Gtk.Scale(orientation=GTK_ParamSlider.ORIENTATION,
adjustment = adj)
self.param_gui_obj.connect("value-changed", self._on_update, self.parameter)
self.param_gui_obj.set_digits(self._num_digits(step))
def _on_update(self, widget, param):
new_val = widget.get_value()
param.set_val(new_val)
logger.to_log(new_val, widget)
#print dir(self)
#print dir(super(GTK_ParamSlider, self))
#print dir(param)
#new_val = self.adj.get_value()
#print new_val
def _num_digits(self, step):
#return the number of decimal places to display based on step
remainder = abs(step - round(step))
remainder_log = - GTK_ParamSlider.log10(remainder)
return max(1, int(remainder_log))
TYPE_MATCHES = {ptypes.Parameter:GTK_ParamTextBox,
ptypes.BoolParameter:GTK_ParamCheckBtn,
ptypes.ListParameter:GTK_ParamList,
ptypes.RangeParameter:GTK_ParamSlider,
ptypes.RangeParameterFit:GTK_ParamSlider} | i026e/python_ecg_graph | gtk_wrapper.py | Python | mit | 5,750 |
lookup = {}
lookup = dict()
lookup = {'age': 42, 'loc': 'Italy'}
lookup = dict(age=42, loc='Italy')
print(lookup)
print(lookup['loc'])
lookup['cat'] = 'cat'
if 'cat' in lookup:
print(lookup['cat'])
class Wizard:
# This actually creates a key value dictionary
def __init__(self, name, level):
self.level = level
self.name = name
# There is an implicit dictionary that stores this data
gandolf = Wizard('Gladolf', 42)
print(gandolf.__dict__)
# The takeway is that all objects are built around the concept of dictionary data structures
# Here is another example
import collections
User = collections.namedtuple('User', 'id, name, email')
users = [
User(1, 'user1', '[email protected]'),
User(2, 'user2', '[email protected]'),
User(3, 'user3', '[email protected]'),
]
lookup = dict()
for u in users:
lookup[u.email] = u
print(lookup['[email protected]']) | derrickyoo/python-jumpstart | apps/09_real_estate_data_miner/concept_dicts.py | Python | mit | 933 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course_selection', '0018_auto_20150830_0319'),
]
operations = [
migrations.AlterUniqueTogether(
name='friend_relationship',
unique_together=None,
),
migrations.RemoveField(
model_name='friend_relationship',
name='from_user',
),
migrations.RemoveField(
model_name='friend_relationship',
name='to_user',
),
migrations.RemoveField(
model_name='nice_user',
name='friends',
),
migrations.DeleteModel(
name='Friend_Relationship',
),
]
| maximz/recal | course_selection/migrations/0019_auto_20150903_0458.py | Python | mit | 809 |
from django.conf.urls import url
from .views import (
semseterResultxlsx,
)
urlpatterns=[
url(r'^semester-xlsx/(?P<collegeCode>\d+)/(?P<branchCode>\d+)/(?P<yearOfJoining>\d+)/(?P<semester>\d+)/$',semseterResultxlsx,name='semseterResultxlsx')
]
| rpsingh21/resultanalysis | resultAnalysis/xlsx/urls.py | Python | mit | 249 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# run as:
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py
# or
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py -A gis
#
#
# Built with code/inspiration from MapFish, OpenLayers & Michael Crute
#
try:
theme = settings.get_theme()
except:
print "ERROR: File now needs to be run in the web2py environment in order to pick up which theme to build"
exit()
import os
import sys
import shutil
SCRIPTPATH = os.path.join(request.folder, "static", "scripts", "tools")
os.chdir(SCRIPTPATH)
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += open(inputFilename, "r").read()
open(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
""" Kills line breaks, tabs, and double spaces """
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = open(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
open(outputFilename, "w").write(_output)
return
def dojs(dogis = False, warnings = True):
""" Minifies the JavaScript """
# Do we have local version of the Closure Compiler available?
use_compressor = "jsmin" # Fallback
try:
import closure
use_compressor = "closure"
print "using local Closure Compiler"
except Exception, E:
print "No closure (%s)" % E
print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
try:
import closure_ws
use_compressor = "closure_ws"
print "Using Closure via Web Service - limited to files < 1Mb!"
except ImportError:
print "No closure_ws"
if use_compressor == "closure":
if not warnings:
closure.extra_params = "--warning_level QUIET"
minimize = closure.minimize
elif use_compressor == "closure_ws":
minimize = closure_ws.minimize
elif use_compressor == "jsmin":
minimize = jsmin.jsmin
sourceDirectory = ".."
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
# Merge JS files
print "Merging Core libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
# Compress JS files
print "Compressing - JS"
minimized = minimize(merged)
# Add license
print "Adding license file."
minimized = open("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
open(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move(outputFilename, "../S3")
# dataTables
print "Compressing dataTables"
sourceDirectorydataTables = ".."
configFilenamedataTables = "sahana.js.dataTables.cfg"
outputFilenamedataTables = "s3.dataTables.min.js"
mergeddataTables = mergejs.run(sourceDirectorydataTables,
None,
configFilenamedataTables)
minimizeddataTables = minimize(mergeddataTables)
open(outputFilenamedataTables, "w").write(minimizeddataTables)
try:
os.remove("../S3/%s" % outputFilenamedataTables)
except:
pass
shutil.move(outputFilenamedataTables, "../S3")
# Vulnerability
print "Compressing Vulnerability"
sourceDirectoryVulnerability = ".."
configFilenameVulnerability = "sahana.js.vulnerability.cfg"
outputFilenameVulnerability = "s3.vulnerability.min.js"
mergedVulnerability = mergejs.run(sourceDirectoryVulnerability,
None,
configFilenameVulnerability)
minimizedVulnerability = minimize(mergedVulnerability)
open(outputFilenameVulnerability, "w").write(minimizedVulnerability)
try:
os.remove("../S3/%s" % outputFilenameVulnerability)
except:
pass
shutil.move(outputFilenameVulnerability, "../S3")
print "Compressing Vulnerability GIS"
sourceDirectoryVulnerability = "../../themes/Vulnerability/js"
configFilenameVulnerability = "sahana.js.vulnerability_gis.cfg"
outputFilenameVulnerability = "OpenLayers.js"
mergedVulnerability = mergejs.run(sourceDirectoryVulnerability,
None,
configFilenameVulnerability)
minimizedVulnerability = minimize(mergedVulnerability)
open(outputFilenameVulnerability, "w").write(minimizedVulnerability)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilenameVulnerability)
except:
pass
shutil.move(outputFilenameVulnerability, "../../themes/Vulnerability/js")
# Single scripts
for filename in [
"contacts",
"embed_component",
"inline_component",
"locationselector.widget",
"popup",
"report",
"select_person",
"timeline",
]:
print "Compressing s3.%s.js" % filename
inputFilename = os.path.join("..", "S3", "s3.%s.js" % filename)
outputFilename = "s3.%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
if dogis:
sourceDirectoryGIS = "../S3"
sourceDirectoryOpenLayers = "../gis/openlayers/lib"
sourceDirectoryOpenLayersExten = "../gis"
sourceDirectoryMGRS = "../gis"
sourceDirectoryGeoExt = "../gis/GeoExt/lib"
sourceDirectoryGeoExtux = "../gis/GeoExt/ux"
sourceDirectoryGxp = "../gis/gxp"
#sourceDirectoryGeoExplorer = "../gis/GeoExplorer"
configFilenameGIS = "sahana.js.gis.cfg"
configFilenameOpenLayers = "sahana.js.ol.cfg"
configFilenameOpenLayersExten = "sahana.js.ol_exten.cfg"
configFilenameMGRS = "sahana.js.mgrs.cfg"
configFilenameGeoExt = "sahana.js.geoext.cfg"
configFilenameGeoExtux = "sahana.js.geoextux.cfg"
configFilenameGxpMin = "sahana.js.gxp.cfg"
configFilenameGxpFull = "sahana.js.gxpfull.cfg"
#configFilenameGeoExplorer = "sahana.js.geoexplorer.cfg"
outputFilenameGIS = "s3.gis.min.js"
outputFilenameOpenLayers = "OpenLayers.js"
outputFilenameMGRS = "MGRS.min.js"
outputFilenameGeoExt = "GeoExt.js"
outputFilenameGxp = "gxp.js"
#outputFilenameGeoExplorer = "GeoExplorer.js"
# Merge GIS JS Files
print "Merging GIS scripts."
mergedGIS = mergejs.run(sourceDirectoryGIS,
None,
configFilenameGIS)
print "Merging OpenLayers libraries."
mergedOpenLayers = mergejs.run(sourceDirectoryOpenLayers,
None,
configFilenameOpenLayers)
mergedOpenLayersExten = mergejs.run(sourceDirectoryOpenLayersExten,
None,
configFilenameOpenLayersExten)
print "Merging MGRS libraries."
mergedMGRS = mergejs.run(sourceDirectoryMGRS,
None,
configFilenameMGRS)
print "Merging GeoExt libraries."
mergedGeoExt = mergejs.run(sourceDirectoryGeoExt,
None,
configFilenameGeoExt)
mergedGeoExtux = mergejs.run(sourceDirectoryGeoExtux,
None,
configFilenameGeoExtux)
print "Merging gxp libraries."
mergedGxpMin = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpMin)
mergedGxpFull = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpFull)
#print "Merging GeoExplorer libraries."
#mergedGeoExplorer = mergejs.run(sourceDirectoryGeoExplorer,
# None,
# configFilenameGeoExplorer)
# Compress JS files
print "Compressing - GIS JS"
minimizedGIS = minimize(mergedGIS)
print "Compressing - OpenLayers JS"
if use_compressor == "closure_ws":
# Limited to files < 1Mb!
minimizedOpenLayers = jsmin.jsmin("%s\n%s" % (mergedOpenLayers,
mergedOpenLayersExten))
else:
minimizedOpenLayers = minimize("%s\n%s" % (mergedOpenLayers,
mergedOpenLayersExten))
print "Compressing - MGRS JS"
minimizedMGRS = minimize(mergedMGRS)
print "Compressing - GeoExt JS"
minimizedGeoExt = minimize("%s\n%s\n%s" % (mergedGeoExt,
mergedGeoExtux,
mergedGxpMin))
print "Compressing - gxp JS"
minimizedGxp = minimize(mergedGxpFull)
#print "Compressing - GeoExplorer JS"
#minimizedGeoExplorer = minimize(mergedGeoExplorer)
# Add license
#minimizedGIS = open("license.gis.txt").read() + minimizedGIS
# Print to output files
print "Writing to %s." % outputFilenameGIS
open(outputFilenameGIS, "w").write(minimizedGIS)
print "Writing to %s." % outputFilenameOpenLayers
open(outputFilenameOpenLayers, "w").write(minimizedOpenLayers)
print "Writing to %s." % outputFilenameMGRS
open(outputFilenameMGRS, "w").write(minimizedMGRS)
print "Writing to %s." % outputFilenameGeoExt
open(outputFilenameGeoExt, "w").write(minimizedGeoExt)
print "Writing to %s." % outputFilenameGxp
open(outputFilenameGxp, "w").write(minimizedGxp)
#print "Writing to %s." % outputFilenameGeoExplorer
#open(outputFilenameGeoExplorer, "w").write(minimizedGeoExplorer)
# Move new JS files
print "Deleting %s." % outputFilenameGIS
try:
os.remove("../S3/%s" % outputFilenameGIS)
except:
pass
print "Moving new GIS JS files"
shutil.move(outputFilenameGIS, "../S3")
print "Deleting %s." % outputFilenameOpenLayers
try:
os.remove("../gis/%s" % outputFilenameOpenLayers)
except:
pass
print "Moving new OpenLayers JS files"
shutil.move(outputFilenameOpenLayers, "../gis")
print "Deleting %s." % outputFilenameMGRS
try:
os.remove("../gis/%s" % outputFilenameMGRS)
except:
pass
print "Moving new MGRS JS files"
shutil.move(outputFilenameMGRS, "../gis")
print "Deleting %s." % outputFilenameGeoExt
try:
os.remove("../gis/%s" % outputFilenameGeoExt)
except:
pass
print "Moving new GeoExt JS files"
shutil.move(outputFilenameGeoExt, "../gis")
print "Deleting %s." % outputFilenameGxp
try:
os.remove("../gis/%s" % outputFilenameGxp)
except:
pass
print "Moving new gxp JS files"
shutil.move(outputFilenameGxp, "../gis")
#print "Deleting %s." % outputFilenameGeoExplorer
#try:
# os.remove("../gis/%s" % outputFilenameGeoExplorer)
#except:
# pass
#print "Moving new GeoExplorer JS files"
#shutil.move(outputFilenameGeoExplorer, "../gis")
def docss():
""" Compresses the CSS files """
listCSS = []
theme = settings.get_theme()
print "Using theme %s" % theme
css_cfg = os.path.join("..", "..", "..", "private", "templates", theme, "css.cfg")
f = open(css_cfg, "r")
files = f.readlines()
f.close()
for file in files[:-1]:
p = re.compile("(\n|\r|\t|\f|\v)+")
file = p.sub("", file)
listCSS.append("../../styles/%s" % file)
outputFilenameCSS = "eden.min.css"
# Merge CSS files
print "Merging Core styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../themes/%s/%s" % (theme, outputFilenameCSS))
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../themes/%s" % theme)
def main(argv):
try:
parameter1 = argv[0]
except:
parameter1 = "ALL"
try:
if(argv[1] == "DOGIS"):
parameter2 = True
else:
parameter2 = False
except:
parameter2 = True
closure_warnings = True
if "NOWARN" in argv:
closure_warnings = False
if parameter1 in ("ALL", "NOWARN"):
dojs(warnings=closure_warnings)
docss()
else:
if parameter1 == "CSS":
docss()
else:
dojs(parameter2, warnings=closure_warnings)
docss()
print "Done."
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| smeissner/eden | static/scripts/tools/build.sahana.py | Python | mit | 14,872 |
"""
Consider this game: Write 8 blanks on a sheet of paper. Randomly pick a digit 0-9. After seeing the digit, choose one
of the 8 blanks to place that digit in. Randomly choose another digit (with replacement) and then choose one of the 7
remaining blanks to place it in. Repeat until you've filled all 8 blanks. You win if the 8 digits written down are in
order from smallest to largest.
Write a program that plays this game by itself and determines whether it won or not. Run it 1 million times and post
your probability of winning.
Assigning digits to blanks randomly lets you win about 0.02% of the time. Here's a python script that wins about 10.3%
of the time. Can you do better?
import random
def trial():
indices = range(8) # remaining unassigned indices
s = [None] * 8 # the digits in their assigned places
while indices:
d = random.randint(0,9) # choose a random digit
index = indices[int(d*len(indices)/10)] # assign it an index
s[index] = str(d)
indices.remove(index)
return s == sorted(s)
print sum(trial() for _ in range(1000000))
thanks to cosmologicon for the challenge at /r/dailyprogrammer_ideas ..
link [http://www.reddit.com/r/dailyprogrammer_ideas/comments/s30be/intermediate_digitassigning_game/]
"""
import random
import itertools
def que_sort(data):
# print(data)
return all(b >= a for a, b in zip(data, itertools.islice(data, 1, None)))
TRIALS = 1
win = 0
for a in range(TRIALS):
l = [None] * 8
p = list(range(8))
while p:
d = random.randint(0,9)
# i = random.choice(p)
i = int(d * (len(p)) / 10)
print(p[i])
l[p[i]] = d
p.pop(i)
print(l)
if que_sort(l):
win += 1
print('{}/{} - {}%'.format(win, TRIALS, win/TRIALS*100)) | DayGitH/Python-Challenges | DailyProgrammer/20120430B.py | Python | mit | 1,804 |
definition = {
"where": "?subj a foaf:Organization .",
"fields": {
"name": {
"where": "?subj rdfs:label ?obj ."
}
}
} | gwu-libraries/vivo2notld | vivo2notld/definitions/organization_summary.py | Python | mit | 157 |
#!/usr/bin/python
import sys
import re
re_valid_email = re.compile(r'^[-_0-9a-zA-Z]+@[0-9a-zA-Z]+\.[0-9a-zA-Z]{1,3}$')
def valid_email(s):
return not (re_valid_email.search(s) == None)
N = int(raw_input().strip())
A = []
for i in range(N):
A += [ str(raw_input().strip()) ]
A.sort()
V = filter(valid_email, A)
print V
#### INPUT ##
## 3
## [email protected]
## [email protected]
## [email protected]
##
#### OUTPUT ##
## ['[email protected]', '[email protected]', '[email protected]']
#### INPUT ##
## 5
## [email protected]
## itsallcrap
## [email protected]
## [email protected]
## matt23@@india.in
##
#### OUTPUT ##
## ['[email protected]', '[email protected]', '[email protected]']
| nabin-info/hackerrank.com | validate-list-of-email-address-with-filter.py | Python | mit | 747 |
import argparse
from nltk.corpus import brown
import requests
import arrow
import json
parser = argparse.ArgumentParser()
parser.add_argument('host')
args = parser.parse_args()
def create_new_novel():
url = 'http://{host}/api/novel'.format(host=args.host)
response = requests.post(url, json={'title': 'Test Novel {}'.format(arrow.utcnow())})
return json.loads(response.text)['id']
def create_new_chapter(novel_id):
url = 'http://{host}/api/chapter'.format(host=args.host)
chapter_title = 'Chapter {}'.format(arrow.utcnow())
response = requests.post(url, json={'title': chapter_title, 'novel_id': novel_id})
return json.loads(response.text)['id']
def post_example_text_to_chapter(chapter_id, host):
url = 'http://{host}/api/novel_token'.format(host=host)
words = brown.words(categories=['news'])
for ordinal, word in enumerate(words):
if ordinal > 1000:
break
requests.post(url, json={'token': word.lower(), 'ordinal': ordinal, 'chapter_id': chapter_id})
if __name__ == '__main__':
novel_id = create_new_novel()
chapter_id = create_new_chapter(novel_id)
post_example_text_to_chapter(chapter_id, args.host) | thebritican/anovelmous | anovelmous/tests/post_example_novel.py | Python | mit | 1,193 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import models
from djanban.apps.hourly_rates.models import HourlyRate
from django import forms
# Hourly rate creation and edition form
class HourlyRateForm(models.ModelForm):
class Meta:
model = HourlyRate
fields = ["name", "start_date", "end_date", "amount", "is_active"]
widgets = {
'start_date': forms.SelectDateWidget(),
'end_date': forms.SelectDateWidget(empty_label=u"Until now"),
}
def __init__(self, *args, **kwargs):
super(HourlyRateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(HourlyRateForm, self).clean()
if cleaned_data.get("end_date") and cleaned_data.get("start_date") > cleaned_data.get("end_date"):
raise ValidationError(u"Start date can't be greater that end date")
return cleaned_data
class DeleteHourlyRateForm(forms.Form):
confirmed = forms.BooleanField(label=u"Please confirm you really want to do this action", required=True)
| diegojromerolopez/djanban | src/djanban/apps/hourly_rates/forms.py | Python | mit | 1,137 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The daemon that calls auto_copy.py uppon optical disc insertion
"""
import signal
import sys
import time
sys.path.append('/usr/local/bin')
import auto_copy
SIGNAL_RECEIVED = False
def run_daemon(config):
"""
Run the damon
config: configParser object
"""
signal.signal(signal.SIGUSR1, signal_handler)
while True:
time.sleep(1)
global SIGNAL_RECEIVED
if SIGNAL_RECEIVED:
auto_copy.auto_copy(config)
SIGNAL_RECEIVED = False
def signal_handler(dump1, dump2):
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
if __name__ == "__main__":
main_config = auto_copy.read_config('/etc/auto_copy.yml')
auto_copy.setup_logging(main_config)
run_daemon(main_config)
| shoubamzlibap/small_projects | auto_copy/auto_copy_daemon.py | Python | mit | 798 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
from monty.json import MontyDecoder
from pymatgen.apps.battery.conversion_battery import ConversionElectrode
from pymatgen.apps.battery.insertion_battery import InsertionElectrode
from pymatgen.apps.battery.plotter import VoltageProfilePlotter
from pymatgen.core.composition import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.util.testing import PymatgenTest
class VoltageProfilePlotterTest(unittest.TestCase):
def setUp(self):
entry_Li = ComputedEntry("Li", -1.90753119)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiTiO2_batt.json")) as f:
entries_LTO = json.load(f, cls=MontyDecoder)
self.ie_LTO = InsertionElectrode.from_entries(entries_LTO, entry_Li)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "FeF3_batt.json")) as fid:
entries = json.load(fid, cls=MontyDecoder)
self.ce_FF = ConversionElectrode.from_composition_and_entries(Composition("FeF3"), entries)
def testName(self):
plotter = VoltageProfilePlotter(xaxis="frac_x")
plotter.add_electrode(self.ie_LTO, "LTO insertion")
plotter.add_electrode(self.ce_FF, "FeF3 conversion")
self.assertIsNotNone(plotter.get_plot_data(self.ie_LTO))
self.assertIsNotNone(plotter.get_plot_data(self.ce_FF))
def testPlotly(self):
plotter = VoltageProfilePlotter(xaxis="frac_x")
plotter.add_electrode(self.ie_LTO, "LTO insertion")
plotter.add_electrode(self.ce_FF, "FeF3 conversion")
fig = plotter.get_plotly_figure()
self.assertEqual(fig.layout.xaxis.title.text, "Atomic Fraction of Li")
plotter = VoltageProfilePlotter(xaxis="x_form")
plotter.add_electrode(self.ce_FF, "FeF3 conversion")
fig = plotter.get_plotly_figure()
self.assertEqual(fig.layout.xaxis.title.text, "x in Li<sub>x</sub>FeF3")
plotter.add_electrode(self.ie_LTO, "LTO insertion")
fig = plotter.get_plotly_figure()
self.assertEqual(fig.layout.xaxis.title.text, "x Workion Ion per Host F.U.")
if __name__ == "__main__":
unittest.main()
| materialsproject/pymatgen | pymatgen/apps/battery/tests/test_plotter.py | Python | mit | 2,269 |
from abc import ABCMeta, abstractmethod
class AbstractAuthenticator(metaclass=ABCMeta):
def __init__(self):
"""
Every authenticator has to have a name
:param name:
"""
super().__init__()
@abstractmethod
def authorise_transaction(self, customer):
"""
Decide whether to authorise transaction.
Note that all relevant information can be obtained from the customer.
:param customer: the customer making a transaction
:return: boolean, whether or not to authorise the transaction
"""
| lmzintgraf/MultiMAuS | authenticators/abstract_authenticator.py | Python | mit | 596 |
from django.conf.urls import url, include
urlpatterns = [
url(r'^postcode-lookup/', include('django_postcode_lookup.urls')),
]
| LabD/django-postcode-lookup | sandbox/urls.py | Python | mit | 132 |
import sys
[_, ms, _, ns] = list(sys.stdin)
ms = set(int(m) for m in ms.split(' '))
ns = set(int(n) for n in ns.split(' '))
print(sep='\n', *sorted(ms.difference(ns).union(ns.difference(ms))))
| alexander-matsievsky/HackerRank | All_Domains/Python/Sets/symmetric-difference.py | Python | mit | 194 |
import os
import numpy as np
class Dataset(object):
"""
This class represents a dataset and consists of a list of SongData along with some metadata about the dataset
"""
def __init__(self, songs_data=None):
if songs_data is None:
self.songs_data = []
else:
self.songs_data = songs_data
def add_song(self, song_data):
self.songs_data.append(song_data)
def songs(self):
for s in self.songs_data:
yield s
@property
def num_features(self):
if len(self.songs_data):
return self.songs_data[0].X.shape[1]
@property
def size(self):
return len(self.songs_data)
def __repr__(self):
return ', '.join([s.name for s in self.songs()])
class SongData(object):
"""
This class holds features, labels, and metadata for a song.
"""
def __init__(self, audio_path, label_path):
if not os.path.isfile(audio_path):
raise IOError("Audio file at %s does not exist" % audio_path)
if label_path and not os.path.isfile(label_path):
raise IOError("MIDI file at %s does not exist" % label_path)
self.audio_path = audio_path
self.label_path = label_path
"""
x [num_samples,] is the samples of the song
"""
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
self.__x = x
"""
X [num_frames x num_features] is the feature matrix for the song
"""
@property
def X(self):
return self.__X
@X.setter
def X(self, X):
if hasattr(self, 'Y') and self.Y.shape[0] != X.shape[0]:
raise ValueError("Number of feature frames must equal number of label frames")
self.__X = X
"""
Y [num_frames x num_pitches] is the label matrix for the song
"""
@property
def Y(self):
return self.__Y
@Y.setter
def Y(self, Y):
if hasattr(self, 'X') and self.X.shape[0] != Y.shape[0]:
raise ValueError("Number of label frames must equal number of feature frames")
self.__Y = Y
@property
def num_pitches(self):
if hasattr(self, 'Y'):
return np.shape(self.Y)[1]
return 0
@property
def num_features(self):
if hasattr(self, 'X'):
return self.X.shape[1]
@property
def num_frames(self):
if hasattr(self, 'X'):
return self.X.shape[0]
@property
def name(self):
return os.path.splitext(os.path.split(self.audio_path)[-1])[0]
| Guitar-Machine-Learning-Group/guitar-transcriber | dataset.py | Python | mit | 2,586 |
import gevent
import time
def doit(i):
print "do it:%s" % (i)
gevent.sleep(2)
print "done:%s" %(i)
t2 = time.time()
threads = {}
for i in range(5):
t = gevent.spawn(doit, i)
threads[i] = t
#print dir(t)
gevent.sleep(1)
print threads
print threads[3].dead
threads[3].kill()
print threads[3].dead
del threads[3]
threads[2].kill()
print threads
#gevent.sleep(3)
print time.time() - t2
for i in threads:
print threads[ i ].dead
#print t
gevent.sleep(3)
print time.time() - t2
for i in threads:
print threads[ i ].dead
| mabotech/maboss.py | maboss/motorx/scheduler/test01.py | Python | mit | 577 |
import sys
import os
import time
import numpy
import cv2
import cv2.cv as cv
from PIL import Image
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
from picture.util import define
from picture.util.system import POINT
from picture.util.log import LOG as L
THRESHOLD = 0.96
class PatternMatch(object):
def __init__(self):
pass
@classmethod
def __patternmatch(self, reference, target):
L.info("reference : %s" % reference)
img_rgb = cv2.imread(reference)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(target, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
loc = numpy.where( res >= THRESHOLD)
result = None
for pt in zip(*loc[::-1]):
result = POINT(pt[0], pt[1], w, h)
return result
@classmethod
def bool(self, reference, target):
result = PatternMatch.__patternmatch(reference, target)
if result is None:
return False
else:
return True
@classmethod
def coordinate(self, reference, target):
return PatternMatch.__patternmatch(reference, target)
if __name__ == "__main__":
pmc = PatternMatch()
print pmc.bool(os.path.join(define.APP_TMP,"screen.png"),
os.path.join(define.APP_TMP,"login.png"))
| setsulla/owanimo | lib/picture/bin/patternmatch.py | Python | mit | 1,439 |
def output_gpx(points, output_filename):
"""
Output a GPX file with latitude and longitude from the points DataFrame.
"""
from xml.dom.minidom import getDOMImplementation
def append_trkpt(pt, trkseg, doc):
trkpt = doc.createElement('trkpt')
trkpt.setAttribute('lat', '%.8f' % (pt['lat']))
trkpt.setAttribute('lon', '%.8f' % (pt['lon']))
trkseg.appendChild(trkpt)
doc = getDOMImplementation().createDocument(None, 'gpx', None)
trk = doc.createElement('trk')
doc.documentElement.appendChild(trk)
trkseg = doc.createElement('trkseg')
trk.appendChild(trkseg)
points.apply(append_trkpt, axis=1, trkseg=trkseg, doc=doc)
with open(output_filename, 'w') as fh:
doc.writexml(fh, indent=' ')
def main():
points = get_data(sys.argv[1])
print('Unfiltered distance: %0.2f' % (distance(points),))
smoothed_points = smooth(points)
print('Filtered distance: %0.2f' % (distance(smoothed_points),))
output_gpx(smoothed_points, 'out.gpx')
if __name__ == '__main__':
main() | MockyJoke/numbers | ex3/code/calc_distance_hint.py | Python | mit | 1,089 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import urllib
import time
import datetime
#From PatMap by Jason Young, available on GitHub at github.com/JasYoung314/PatMap
#Function to get distance between 2 points from google maps. By default route is by car, distance is given in miles and time in minutes.
def CalculateDistance(Origin = False,Destination = False, Method = "driving",TimeUnits = "Minutes",DistUnits = "Miles"):
#this is the start of a distnace matrix url
base = "https://maps.googleapis.com/maps/api/distancematrix/json?"
#Converts the variables to the required format
urlorigin = "origins=%s&".encode('utf-8') %(Origin)
urldestination = "destinations=%s&".encode('utf-8') %(Destination)
urlmethod = "mode=%s&" %(Method)
if DistUnits == "Kilometers" or DistUnits == "Meters":
urlunits = "units=metric&"
else:
urlunits = "units=imperial&"
#constructs the completed url
url = base.decode('utf-8') + urlorigin.decode('utf-8') + urldestination.decode('utf-8') + urlmethod.decode('utf-8') + urlunits.decode('utf-8') + "language=en-EN&sensor=false".decode('utf-8')
#Interprets the json data recieved
try:
result= json.load(urllib.urlopen(url))
except:
return 'ERROR','ERROR'
#Reads the status code and takes the appropriate action
if result["status"] == "OK":
if result["rows"][0]["elements"][0]["status"] == "OK":
time = result["rows"][0]["elements"][0]["duration"]["value"]
distance = result["rows"][0]["elements"][0]["distance"]["value"]
if TimeUnits == "Minutes":
time = time/60.0
elif TimeUnits == "Hours":
time = time/3600.0
if DistUnits == "Kilometres":
distance = distance/1000.0
elif DistUnits == "Yards":
distance = distance*1.0936133
elif DistUnits == "Miles":
distance = distance*0.000621371192
return time,distance
else:
return result["rows"][0]["elements"][0]["status"],result["rows"][0]["elements"][0]["status"]
else:
return result["status"]
| MatthewGWilliams/Staff-Transport | emergencyTransport/RouteFinder/GoogleDistances.py | Python | mit | 1,984 |
from datetime import date
NTESTS = 1
PREV_DAYS = 10
PERCENT_UP = 0.01
PERCENT_DOWN = 0.01
PERIOD = 'Hourly' # [5-min, 15-min, 30-min, Hourly, 2-hour, 6-hour, 12-hour, Daily, Weekly]
MARKET = 'bitstampUSD'
# DATE START
YEAR_START = 2011
MONTH_START = 9
DAY_START = 13
DATE_START = date(YEAR_START, MONTH_START, DAY_START)
# DATE END
DATE_END = date.today()
URL_DATA_BASE = 'http://bitcoincharts.com/charts/chart.json?'
| bukosabino/btctrading | settings.py | Python | mit | 422 |
from rest_framework import test, status
from waldur_core.structure.models import CustomerRole, ProjectRole
from waldur_core.structure.tests import factories as structure_factories
from . import factories
class ServiceProjectLinkPermissionTest(test.APITransactionTestCase):
def setUp(self):
self.users = {
'owner': structure_factories.UserFactory(),
'admin': structure_factories.UserFactory(),
'manager': structure_factories.UserFactory(),
'no_role': structure_factories.UserFactory(),
'not_connected': structure_factories.UserFactory(),
}
# a single customer
self.customer = structure_factories.CustomerFactory()
self.customer.add_user(self.users['owner'], CustomerRole.OWNER)
# that has 3 users connected: admin, manager
self.connected_project = structure_factories.ProjectFactory(customer=self.customer)
self.connected_project.add_user(self.users['admin'], ProjectRole.ADMINISTRATOR)
self.connected_project.add_user(self.users['manager'], ProjectRole.MANAGER)
# has defined a service and connected service to a project
self.service = factories.OpenStackServiceFactory(customer=self.customer)
self.service_project_link = factories.OpenStackServiceProjectLinkFactory(
project=self.connected_project,
service=self.service)
# the customer also has another project with users but without a permission link
self.not_connected_project = structure_factories.ProjectFactory(customer=self.customer)
self.not_connected_project.add_user(self.users['not_connected'], ProjectRole.ADMINISTRATOR)
self.not_connected_project.save()
self.url = factories.OpenStackServiceProjectLinkFactory.get_list_url()
def test_anonymous_user_cannot_grant_service_to_project(self):
response = self.client.post(self.url, self._get_valid_payload())
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_connect_service_and_project_he_owns(self):
user = self.users['owner']
self.client.force_authenticate(user=user)
service = factories.OpenStackServiceFactory(customer=self.customer)
project = structure_factories.ProjectFactory(customer=self.customer)
payload = self._get_valid_payload(service, project)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_admin_cannot_connect_new_service_and_project_if_he_is_project_admin(self):
user = self.users['admin']
self.client.force_authenticate(user=user)
service = factories.OpenStackServiceFactory(customer=self.customer)
project = self.connected_project
payload = self._get_valid_payload(service, project)
response = self.client.post(self.url, payload)
# the new service should not be visible to the user
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertDictContainsSubset(
{'service': ['Invalid hyperlink - Object does not exist.']}, response.data)
def test_user_cannot_revoke_service_and_project_permission_if_he_is_project_manager(self):
user = self.users['manager']
self.client.force_authenticate(user=user)
url = factories.OpenStackServiceProjectLinkFactory.get_url(self.service_project_link)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def _get_valid_payload(self, service=None, project=None):
return {
'service': factories.OpenStackServiceFactory.get_url(service),
'project': structure_factories.ProjectFactory.get_url(project)
}
| opennode/nodeconductor-openstack | src/waldur_openstack/openstack/tests/test_service_project_link.py | Python | mit | 3,839 |
#!/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Billy Olsen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
from datetime import datetime as dt
import os
import six
import smtplib
# Get the directory for this file.
SECRET_SANTA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
j2env = Environment(loader=FileSystemLoader(SECRET_SANTA_DIR),
trim_blocks=False)
class SantaMail(object):
"""
The SantaMail object is used to send email. This class will load email
templates that should be sent out (the master list email and the email
for each Secret Santa.
Templates will be loaded from the template directory and is configurable
via the template_master and template_santa configuration variables.
"""
REQUIRED_PARAMS = ['author', 'email', 'smtp', 'username', 'password']
def __init__(self, author, email, smtp, username, password,
template_master="master.tmpl", template_santa="santa.tmpl"):
self.author = author
self.email = email
self.smtp = smtp
self.username = username
self.password = password
self.template_master = template_master
self.template_santa = template_santa
def send(self, pairings):
"""
Sends the emails out to the secret santa participants.
The secret santa host (the user configured to send the email from)
will receive a copy of the master list.
Each Secret Santa will receive an email with the contents of the
template_santa template.
"""
for pair in pairings:
self._send_to_secret_santa(pair)
self._send_master_list(pairings)
def _do_send(self, toaddr, body, subject):
try:
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.email
msg['To'] = toaddr
server = smtplib.SMTP(self.smtp)
server.starttls()
server.login(self.username, self.password)
server.sendmail(self.email, [toaddr], msg.as_string())
server.quit()
except:
print("Error sending email to %s!" % toaddr)
def _send_to_secret_santa(self, pair):
"""
Sends an email to the secret santa pairing.
"""
(giver, receiver) = pair
template = j2env.get_template(self.template_santa)
body = template.render(giver=giver, receiver=receiver)
year = dt.utcnow().year
subject = ('Your %s Farmer Family Secret Santa Match' % year)
self._do_send(giver.email, body, subject)
def _send_master_list(self, pairings):
"""
Sends an email to the game master.
"""
pair_list = []
for pair in pairings:
(giver, recipient) = pair
pair_list.append("%s -> %s" % (giver.name, recipient.name))
template = j2env.get_template(self.template_master)
body = template.render(pairs=pair_list)
year = dt.utcnow().year
subject = ('%s Farmer Family Secret Santa Master List' % year)
self._do_send(self.email, body, subject)
| wolsen/secret-santa | secretsanta/mail.py | Python | mit | 4,308 |
# coding=utf-8
"""
Collect the elasticsearch stats for the local node
#### Dependencies
* urlib2
"""
import urllib2
import re
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
RE_LOGSTASH_INDEX = re.compile('^(.*)-\d\d\d\d\.\d\d\.\d\d$')
class ElasticSearchCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ElasticSearchCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'stats': "Available stats: \n"
+ " - jvm (JVM information) \n"
+ " - thread_pool (Thread pool information) \n"
+ " - indices (Individual index stats)\n",
'logstash_mode': "If 'indices' stats are gathered, remove "
+ "the YYYY.MM.DD suffix from the index name "
+ "(e.g. logstash-adm-syslog-2014.01.03) and use that "
+ "as a bucket for all 'day' index stats.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElasticSearchCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 9200,
'path': 'elasticsearch',
'stats': ['jvm', 'thread_pool', 'indices'],
'logstash_mode': False,
})
return config
def _get(self, path):
url = 'http://%s:%i/%s' % (
self.config['host'], int(self.config['port']), path)
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error("%s: %s", url, err)
return False
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a"
+ " json object")
return False
def _copy_one_level(self, metrics, prefix, data, filter=lambda key: True):
for key, value in data.iteritems():
if filter(key):
metric_path = '%s.%s' % (prefix, key)
self._set_or_sum_metric(metrics, metric_path, value)
def _copy_two_level(self, metrics, prefix, data, filter=lambda key: True):
for key1, d1 in data.iteritems():
self._copy_one_level(metrics, '%s.%s' % (prefix, key1), d1, filter)
def _index_metrics(self, metrics, prefix, index):
if self.config['logstash_mode']:
"""Remove the YYYY.MM.DD bit from logstash indices.
This way we keep using the same metric naming and not polute
our metrics system (e.g. Graphite) with new metrics every day."""
m = RE_LOGSTASH_INDEX.match(prefix)
if m:
prefix = m.group(1)
# keep a telly of the number of indexes
self._set_or_sum_metric(metrics,
'%s.indexes_in_group' % prefix, 1)
self._add_metric(metrics, '%s.docs.count' % prefix, index,
['docs', 'count'])
self._add_metric(metrics, '%s.docs.deleted' % prefix, index,
['docs', 'deleted'])
self._add_metric(metrics, '%s.datastore.size' % prefix, index,
['store', 'size_in_bytes'])
# publish all 'total' and 'time_in_millis' stats
self._copy_two_level(
metrics, prefix, index,
lambda key: key.endswith('total') or key.endswith('time_in_millis'))
def _add_metric(self, metrics, metric_path, data, data_path):
"""If the path specified by data_path (a list) exists in data,
add to metrics. Use when the data path may not be present"""
current_item = data
for path_element in data_path:
current_item = current_item.get(path_element)
if current_item is None:
return
self._set_or_sum_metric(metrics, metric_path, current_item)
def _set_or_sum_metric(self, metrics, metric_path, value):
"""If we already have a datapoint for this metric, lets add
the value. This is used when the logstash mode is enabled."""
if metric_path in metrics:
metrics[metric_path] += value
else:
metrics[metric_path] = value
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
result = self._get('_nodes/_local/stats?all=true')
if not result:
return
metrics = {}
node = result['nodes'].keys()[0]
data = result['nodes'][node]
#
# http connections to ES
metrics['http.current'] = data['http']['current_open']
#
# indices
indices = data['indices']
metrics['indices.docs.count'] = indices['docs']['count']
metrics['indices.docs.deleted'] = indices['docs']['deleted']
metrics['indices.datastore.size'] = indices['store']['size_in_bytes']
transport = data['transport']
metrics['transport.rx.count'] = transport['rx_count']
metrics['transport.rx.size'] = transport['rx_size_in_bytes']
metrics['transport.tx.count'] = transport['tx_count']
metrics['transport.tx.size'] = transport['tx_size_in_bytes']
# elasticsearch < 0.90RC2
if 'cache' in indices:
cache = indices['cache']
self._add_metric(metrics, 'cache.bloom.size', cache,
['bloom_size_in_bytes'])
self._add_metric(metrics, 'cache.field.evictions', cache,
['field_evictions'])
self._add_metric(metrics, 'cache.field.size', cache,
['field_size_in_bytes'])
metrics['cache.filter.count'] = cache['filter_count']
metrics['cache.filter.evictions'] = cache['filter_evictions']
metrics['cache.filter.size'] = cache['filter_size_in_bytes']
self._add_metric(metrics, 'cache.id.size', cache,
['id_cache_size_in_bytes'])
# elasticsearch >= 0.90RC2
if 'filter_cache' in indices:
cache = indices['filter_cache']
metrics['cache.filter.evictions'] = cache['evictions']
metrics['cache.filter.size'] = cache['memory_size_in_bytes']
self._add_metric(metrics, 'cache.filter.count', cache, ['count'])
# elasticsearch >= 0.90RC2
if 'id_cache' in indices:
cache = indices['id_cache']
self._add_metric(metrics, 'cache.id.size', cache,
['memory_size_in_bytes'])
# elasticsearch >= 0.90
if 'fielddata' in indices:
fielddata = indices['fielddata']
self._add_metric(metrics, 'fielddata.size', fielddata,
['memory_size_in_bytes'])
self._add_metric(metrics, 'fielddata.evictions', fielddata,
['evictions'])
#
# process mem/cpu (may not be present, depending on access restrictions)
self._add_metric(metrics, 'process.cpu.percent', data,
['process', 'cpu', 'percent'])
self._add_metric(metrics, 'process.mem.resident', data,
['process', 'mem', 'resident_in_bytes'])
self._add_metric(metrics, 'process.mem.share', data,
['process', 'mem', 'share_in_bytes'])
self._add_metric(metrics, 'process.mem.virtual', data,
['process', 'mem', 'total_virtual_in_bytes'])
#
# filesystem (may not be present, depending on access restrictions)
if 'fs' in data and 'data' in data['fs'] and data['fs']['data']:
fs_data = data['fs']['data'][0]
self._add_metric(metrics, 'disk.reads.count', fs_data,
['disk_reads'])
self._add_metric(metrics, 'disk.reads.size', fs_data,
['disk_read_size_in_bytes'])
self._add_metric(metrics, 'disk.writes.count', fs_data,
['disk_writes'])
self._add_metric(metrics, 'disk.writes.size', fs_data,
['disk_write_size_in_bytes'])
#
# jvm
if 'jvm' in self.config['stats']:
jvm = data['jvm']
mem = jvm['mem']
for k in ('heap_used', 'heap_committed', 'non_heap_used',
'non_heap_committed'):
metrics['jvm.mem.%s' % k] = mem['%s_in_bytes' % k]
for pool, d in mem['pools'].iteritems():
pool = pool.replace(' ', '_')
metrics['jvm.mem.pools.%s.used' % pool] = d['used_in_bytes']
metrics['jvm.mem.pools.%s.max' % pool] = d['max_in_bytes']
metrics['jvm.threads.count'] = jvm['threads']['count']
gc = jvm['gc']
collection_count = 0
collection_time_in_millis = 0
for collector, d in gc['collectors'].iteritems():
metrics['jvm.gc.collection.%s.count' % collector] = d[
'collection_count']
collection_count += d['collection_count']
metrics['jvm.gc.collection.%s.time' % collector] = d[
'collection_time_in_millis']
collection_time_in_millis += d['collection_time_in_millis']
# calculate the totals, as they're absent in elasticsearch > 0.90.10
if 'collection_count' in gc:
metrics['jvm.gc.collection.count'] = gc['collection_count']
else:
metrics['jvm.gc.collection.count'] = collection_count
k = 'collection_time_in_millis'
if k in gc:
metrics['jvm.gc.collection.time'] = gc[k]
else:
metrics['jvm.gc.collection.time'] = collection_time_in_millis
#
# thread_pool
if 'thread_pool' in self.config['stats']:
self._copy_two_level(metrics, 'thread_pool', data['thread_pool'])
#
# network
self._copy_two_level(metrics, 'network', data['network'])
if 'indices' in self.config['stats']:
#
# individual index stats
result = self._get('_stats?clear=true&docs=true&store=true&'
+ 'indexing=true&get=true&search=true')
if not result:
return
_all = result['_all']
self._index_metrics(metrics, 'indices._all', _all['primaries'])
if 'indices' in _all:
indices = _all['indices']
elif 'indices' in result: # elasticsearch >= 0.90RC2
indices = result['indices']
else:
return
for name, index in indices.iteritems():
self._index_metrics(metrics, 'indices.%s' % name,
index['primaries'])
for key in metrics:
self.publish(key, metrics[key])
| metamx/Diamond | src/collectors/elasticsearch/elasticsearch.py | Python | mit | 11,323 |
# Inspired from VecEnv from OpenAI Baselines
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
raise NotImplementedError()
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
raise NotImplementedError()
def close(self):
"""
Clean up the environments' resources.
"""
raise NotImplementedError()
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
logger.warn('Render not defined for %s' % self)
def seed(self, i):
raise NotImplementedError()
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
| matthiasplappert/keras-rl | rl/common/vec_env/__init__.py | Python | mit | 2,310 |
EGA2RGB = [
(0x00, 0x00, 0x00),
(0x00, 0x00, 0xAA),
(0x00, 0xAA, 0x00),
(0x00, 0xAA, 0xAA),
(0xAA, 0x00, 0x00),
(0xAA, 0x00, 0xAA),
(0xAA, 0x55, 0x00),
(0xAA, 0xAA, 0xAA),
(0x55, 0x55, 0x55),
(0x55, 0x55, 0xFF),
(0x55, 0xFF, 0x55),
(0x55, 0xFF, 0xFF),
(0xFF, 0x55, 0x55),
(0xFF, 0x55, 0xFF),
(0xFF, 0xFF, 0x55),
(0xFF, 0xFF, 0xFF),
]
def load_shapes():
shapes = []
bytes = open("ULT/SHAPES.EGA").read()
for i in range(256):
shape = []
for j in range(16):
for k in range(8):
d = ord(bytes[k + 8 * j + 128 * i])
a, b = divmod(d, 16)
shape.append(EGA2RGB[a])
shape.append(EGA2RGB[b])
shapes.append(shape)
return shapes
| jtauber/ultima4 | shapes.py | Python | mit | 800 |
from django.apps import AppConfig
class BallerShotCallerConfig(AppConfig):
name = 'baller_shot_caller'
| kizzen/Baller-Shot-Caller | web_site/baller_shot_caller/apps.py | Python | mit | 109 |
import json
from util import d
import os
__home = os.path.expanduser("~").replace('\\', '/') + "/PixelWeb/"
BASE_SERVER_CONFIG = d({
"id":"server_config",
"display": "server_config",
"preconfig": False,
"presets":[],
"params": [{
"id": "external_access",
"label": "Allow External Access",
"type": "bool",
"default": True,
"help":"On: Other computers on your network can access PixelWeb. Off: LocalHost access only."
},{
"id": "port",
"label": "Server Port",
"type": "int",
"default": 8080,
"help":"Port to listen on."
},{
"id": "load_defaults",
"label": "Load Last Config on Start",
"type": "bool",
"default": False,
"help":"Load last driver/controller configuration on application start."
},
{
"id": "show_debug",
"label": "Show Debug in Console",
"type": "bool",
"default": False,
"help":"Show BiblioPixel debug in server console (not in main UI)."
},{
"id": "mod_dirs",
"label": "Module Directories",
"type": "str_multi",
"default": [],
"help":"Directories from which to load modules (animations, drivers, controllers, pre-configs).",
"replace": {"\\":"/"}
},
{
"id": "off_anim_time",
"label": "All Off Timeout",
"type": "int",
"default": 10,
"min": 0,
"max": 3600,
"help":"Keep display off when not running an animation by actively turning all pixels off every X seconds. Set to 0 to disable."
},]
});
def setHome(home):
global __home
__home = home
def genDefaultConfig(params):
c = {}
for p in params:
p = d(p)
c[p.id] = p.default
return c
def initConfig():
try:
if not os.path.exists(__home):
print "Creating {}".format(__home)
os.makedirs(__home)
except:
print "Failed to initialize PixelWeb config!"
def readConfig(file, key = None, path=None):
if not path:
path = __home
data = {}
try:
with open(path + "/" + file + ".json", "r") as fp:
data = json.load(fp, encoding='utf-8')
if key:
if key in data:
data = data[key]
else:
data = {}
except Exception, e:
pass
return d(data)
def writeConfig(file, data, key = None, path=None):
if not path:
path = __home
base = data
if key:
base = readConfig(file, path=path)
base[key] = data
with open(path + "/" + file + ".json", "w") as fp:
json.dump(base, fp, indent=4, sort_keys=True)
def paramsToDict(params):
data = {}
for p in params:
if "default" not in p:
p.default = None
data[p.id] = p.default
return data
def readServerConfig():
data = readConfig("config", path=__home)
base = paramsToDict(BASE_SERVER_CONFIG.params)
if len(data.keys()) == 0:
data = paramsToDict(BASE_SERVER_CONFIG.params)
elif len(data.keys()) != len(base.keys()):
data.upgrade(base)
return d(data)
def writeServerConfig(data):
writeConfig("config", data)
def upgradeServerConfig():
b = genDefaultConfig(BASE_SERVER_CONFIG.params)
cfg = readServerConfig()
cfg.upgrade(b)
writeServerConfig(cfg)
| ManiacalLabs/PixelWeb | pixelweb/config.py | Python | mit | 3,791 |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 29 21:25:13 2014
@author: 27182_000
"""
# A palindromic number reads the same both ways. The largest palindrome made
# from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
import sys
ans = 1
for n in range(999,1,-1):
for m in range(999,1,-1):
num = n*m
if str(num) == str(num)[::-1] and num > ans:
ans = num
print ans | ecotner/Learning-to-fly | Project Euler Python/Problem 04/problem4.py | Python | mit | 485 |
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse('Page content')
def custom(request):
return render(request, 'custom.html', {})
| geelweb/geelweb-django-contactform | tests/views.py | Python | mit | 202 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('content', '0009_auto_20150829_1417'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('msg_subject', models.CharField(max_length=255, verbose_name='Subject')),
('msg_text', models.TextField(verbose_name='Text')),
('msg_author', models.EmailField(max_length=75, verbose_name='From')),
('recv_date', models.DateTimeField(editable=False, verbose_name='Date Received', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 712166, tzinfo=utc))),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='event',
name='pub_date',
field=models.DateTimeField(editable=False, verbose_name='Date Published', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 711232, tzinfo=utc)),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(editable=False, verbose_name='Date Published', default=datetime.datetime(2015, 10, 19, 4, 10, 29, 711716, tzinfo=utc)),
preserve_default=True,
),
]
| sfowl/fowllanguage | content/migrations/0010_auto_20151019_1410.py | Python | mit | 1,608 |
'''
Manage Ruby gem packages. (see https://rubygems.org/ )
'''
from pyinfra.api import operation
from pyinfra.facts.gem import GemPackages
from .util.packaging import ensure_packages
@operation
def packages(packages=None, present=True, latest=False, state=None, host=None):
'''
Add/remove/update gem packages.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
Versions:
Package versions can be pinned like gem: ``<pkg>:<version>``.
Example:
.. code:: python
# Note: Assumes that 'gem' is installed.
gem.packages(
name='Install rspec',
packages=['rspec'],
)
'''
yield ensure_packages(
host, packages, host.get_fact(GemPackages), present,
install_command='gem install',
uninstall_command='gem uninstall',
upgrade_command='gem update',
version_join=':',
latest=latest,
)
| Fizzadar/pyinfra | pyinfra/operations/gem.py | Python | mit | 1,033 |
# -*- coding: utf-8 -*-
#
# partpy documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 16 18:56:06 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'partpy'
copyright = u'2013, Taylor "Nekroze" Lawson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'partpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'partpy.tex', u'partpy Documentation',
u'Taylor "Nekroze" Lawson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'partpy', u'partpy Documentation',
[u'Taylor "Nekroze" Lawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'partpy', u'partpy Documentation',
u'Taylor "Nekroze" Lawson', 'partpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'partpy'
epub_author = u'Taylor "Nekroze" Lawson'
epub_publisher = u'Taylor "Nekroze" Lawson'
epub_copyright = u'2013, Taylor "Nekroze" Lawson'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| Nekroze/partpy | docs/conf.py | Python | mit | 9,097 |
import pytest
from clustaar.authorize.conditions import TrueCondition
@pytest.fixture
def condition():
return TrueCondition()
class TestCall(object):
def test_returns_true(self, condition):
assert condition({})
| Clustaar/clustaar.authorize | tests/authorize/conditions/test_true_condition.py | Python | mit | 231 |
# user.py is the autostart code for a ulnoiot node.
# Configure your devices, sensors and local interaction here.
# Always start with this to make everything from ulnoiot available.
# Therefore, do not delete the following line.
from ulnoiot import *
# The following is just example code, adjust to your needs accordingly.
# wifi and mqtt connect are done automatically, we assume for this example
# the following configuration.
# mqtt("ulnoiotgw", "myroom/test1")
## Use some shields
# The onboard-led is always available.
# With this configuration it will report under myroom/test1/blue
# and can be set via sending off or on to myroom/test1/blue/test.
from ulnoiot.shield.onboardled import blue
blue.high() # make sure it's off (it's reversed)
## Add some other devices
# Add a button with a slightly higher debounce rate, which will report
# in the topic myroom/test1/button1.
button("b1", d6, pullup=False, threshold=2)
# Count rising signals on d2=Pin(4) and
# report number counted at myroom/test1/shock1.
# trigger("shock1",Pin(4))
## Start to transmit every 10 seconds (or when status changed).
# Don't forget the run-comamnd at the end.
run(5)
| ulno/micropython-extra-ulno | examples/integriot_test/testnode1/files/autostart.py | Python | mit | 1,163 |
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from . import common
from .. import rw
from ..glossary import DEFAULT_TIMEOUT
from .base import BaseMessage
class CancelMessage(BaseMessage):
__slots__ = BaseMessage.__slots__ + (
'ttl',
'tracing',
'why',
)
def __init__(self, ttl=DEFAULT_TIMEOUT, tracing=None, why=None, id=0):
super(CancelMessage, self).__init__(id)
self.ttl = ttl
self.tracing = tracing or common.Tracing(0, 0, 0, 0)
self.why = why or ''
cancel_rw = rw.instance(
CancelMessage,
('ttl', rw.number(4)), # ttl:4
('tracing', common.tracing_rw), # tracing:24
('why', rw.len_prefixed_string(rw.number(2))), # why:2
)
| Willyham/tchannel-python | tchannel/messages/cancel.py | Python | mit | 1,854 |
# coding: utf-8
import pygame
import os
from color import *
from pygame.locals import *
class Score(pygame.sprite.Sprite):
def __init__(self, score, player, width, height):
super(pygame.sprite.Sprite).__init__(Score)
self.score = int(score)
self.color = None
self.player = player
self.bossHeight = height
self.bossWidth = width
self.size = 70
self.update()
def update(self):
self.score = int(self.score)
self.whatColor()
self.score = str(self.score)
scoreFont = pygame.font.Font('./fonts/Dearest.ttf', self.size)
# We need to convert it to do the condition in 'self.wharColor'
# and 'scoreFont.rend' only takes 'str' as argument
self.surface = scoreFont.render(self.score, True, self.color)
self.rect = self.surface.get_rect()
if self.player == 1:
self.rect.center = (55, self.bossHeight - 50)
elif self.player == -1:
self.rect.center = (self.bossWidth - 55, self.bossHeight - 50)
def whatColor(self):
self.size = 80
if self.score < 6:
self.color = white
elif self.score < 8:
self.color = aqua
elif self.score < 10:
self.color = blueGreen
else:
self.color = lime
self.size = 100
def updateScore(self, score):
self.score = score
def __repr__(self):
return "<Score de ", str(self.player), "= ", str(self.score)
| Ilphrin/TuxleTriad | Score.py | Python | mit | 1,523 |
from __future__ import unicode_literals
from django.db import models
from django.utils.timezone import now, timedelta
Q = models.Q
class LogisticJob(models.Model):
LOCK_FOR = (
(60*15, '15 minutes'),
(60*30, '30 minutes'),
(60*45, '45 minutes'),
(60*60, '1 hour'),
(60*60*3, '3 hours'),
(60*60*6, '6 hours'),
(60*60*9, '9 hours'),
(60*60*12, '12 hours'),
(60*60*18, '18 hours'),
(60*60*24, '24 hours'),
)
RESOURCE = (
('wood', 'Wood'),
('stone', 'Stone'),
('food', 'Food'),
# ('cole', 'Cole'),
)
SPEED = (
('-1', 'Keine Pferde'),
('1001', 'Gold Pferde (test)'),
('1004', 'Rubi Pferde 1 (test)'),
('1007', 'Rubi Pferde 2 (test)'),
)
player = models.ForeignKey("gge_proxy_manager.Player", related_name='logistic_jobs')
castle = models.ForeignKey("gge_proxy_manager.Castle", related_name='outgoing_logistic_jobs')
receiver = models.ForeignKey("gge_proxy_manager.Castle", related_name='incoming_logistic_jobs')
speed = models.CharField(max_length=5, choices=SPEED)
is_active = models.BooleanField(default=True)
resource = models.CharField(max_length=6, choices=RESOURCE)
gold_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
resource_limit = models.PositiveIntegerField()
lock_for = models.PositiveIntegerField(choices=LOCK_FOR, default=60*45)
locked_till = models.DateTimeField(default=now, db_index=True)
class Meta:
app_label = 'gge_proxy_manager'
def delay(self):
self.locked_till = now() + timedelta(seconds=self.lock_for)
self.save()
def last_succeed(self):
from .log import LogisticLog
log = LogisticLog.objects.filter(castle=self.castle,
receiver=self.receiver,
resource=self.resource).order_by('-sent').first()
if log:
return log.sent
return None
class ProductionJob(models.Model):
player = models.ForeignKey("gge_proxy_manager.Player", related_name='production_jobs')
castle = models.ForeignKey("gge_proxy_manager.Castle", related_name='production_jobs')
unit = models.ForeignKey("gge_proxy_manager.Unit")
valid_until = models.PositiveIntegerField(null=True, blank=True, default=None,
help_text='Bis zu welcher Menge ist der Auftrag gueltig')
is_active = models.BooleanField(default=True)
gold_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
food_balance_limit = models.IntegerField(null=True, blank=True, default=None)
wood_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
stone_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
burst_mode = models.BooleanField(default=False, help_text='Ignoriert Nahrungsbilanz')
locked_till = models.DateTimeField(default=now, db_index=True)
last_fault_reason = models.CharField(null=True, default=None, max_length=128)
last_fault_date = models.DateTimeField(default=None, null=True)
class Meta:
app_label = 'gge_proxy_manager'
def last_succeed(self):
from .log import ProductionLog
log = ProductionLog.objects.filter(castle=self.castle, unit=self.unit).order_by('-produced').first()
if log:
return log.produced
return None | mrcrgl/gge-storage | gge_proxy_manager/models/jobs.py | Python | mit | 3,505 |
"""Parse ISI journal abbreviations website."""
# Copyright (c) 2012 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
class ISIJournalParser(HTMLParser):
"""Parser for ISI Web of Knowledge journal abbreviation pages.
**Note:**
Due to the ISI pages containing malformed html one must call
the :py:meth:`ISIJournalParser.finalize` method once
parsing is complete to ensure all entries are read correctly.
"""
def __init__(self):
HTMLParser.__init__(self)
self.journal_names = []
self.journal_abbreviations = []
self.parser_state = None
self.data_entities = None
def handle_starttag(self, tag, attrs):
if tag not in ('dd', 'dt'):
return
self._storedata()
self.parser_state = tag
self.data_entities = []
def handle_data(self, data):
if self.parser_state in ('dd', 'dt'):
self.data_entities.append(data)
def _storedata(self):
if self.data_entities and self.parser_state:
if self.parser_state == 'dt':
self.journal_names.append(''.join(self.data_entities).strip())
elif self.parser_state == 'dd':
self.journal_abbreviations.append(''.join(self.data_entities).strip())
def finalize(self):
"""Ensures all data is stored.
This method must be called when parsing is complete.
"""
self._storedata()
| ajdawson/jabr | lib/parser.py | Python | mit | 2,590 |
import glob
import os
import pandas as pd
class CTD(object):
"""docstring for CTD"""
def __init__(self):
self.format_l = []
self.td_l = []
self.iternum = 0
self.formatname = ""
def feature(self,index):
format_l = self.format_l
feature = ((float(format_l[index+1][1])-float(format_l[index+3][1]))/float(format_l[index+1][1]))+((float(format_l[index+1][4])-float(format_l[index+3][4]))/float(format_l[index+1][4]))
if (feature == 0):
feature = 0.0001
return feature
def format(self,path):
a = path.split('/')
self.formatname = a[2]
with open(path, 'r') as f:
a = f.read()
f = a.split('\n')
f.pop(0)
self.iternum = len(f)-3
for a in range(len(f)):
a = f[a].split(',')
a.pop(0)
self.format_l.append(a)
def trainData(self):
for index in range(self.iternum):
try:
format_l = self.format_l
classify = (float(format_l[index][3])-float(format_l[index+1][3]))/float(format_l[index+1][3])*100
feature = self.feature(index)
a = ['0']+format_l[index+1]+format_l[index+2]+format_l[index+3]+[feature]
self.td_l.append(a)
except:
pass
def storage_csv(self):
rowname=['classify','feature','1-open','1-high','1-low','1-close','1-volume','1-adj close','2-open','2-high','2-low','2-close','2-volume','2-adj close','3-open','3-high','3-low','3-close','3-volume','3-adj close']
df = pd.DataFrame(self.td_l,columns=rowname)
with open('./traindata/td_'+self.formatname+'.csv', 'w') as f:
df.to_csv(f)
print('td_'+self.formatname+'.csv is creat!')
def storage_txt(self,pathname):
with open('./predict/data/'+pathname,'ab') as f:
for a in self.td_l:
b = str(a[0])+'\t'
for c in range(1,20):
d = str(c)+':'+str(a[c])+'\t'
b += d
f.write(b+'\n')
def run(self):
path = './stock/*'
paths=glob.glob(path)
for index,path in enumerate(paths,1):
print(index)
self.format_l = []
self.td_l = []
self.format(path)
self.trainData()
path = path.split('/')
pathname = path[2]
self.storage_txt(pathname)
print os.popen("./bin/svm-scale -s predict_scale_model ./predict/data/"+pathname+" > ./predict/scale/"+pathname+"predict_data.scale").read()
print os.popen("./bin/rvkde --best --predict --classify -v ./train/scale/"+pathname+"train_data.scale -V ./predict/scale/"+pathname+"predict_data.scale > ./predict/result/"+pathname+"predict_result").read()
def main():
ctd = CTD()
ctd.run()
if __name__ == '__main__' :
main()
| wy36101299/NCKU_Machine-Learning-and-Bioinformatics | hw4_predictData/creatPredictdata.py | Python | mit | 2,986 |
"""
Running the template pre-processor standalone.
Input: Templated Antimony model (stdin)
Output: Expanded Antimony model (stdout)
"""
import fileinput
import os
import sys
directory = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(directory, "TemplateSB")
sys.path.append(path)
from template_processor import TemplateProcessor
template_stg = ''
for line in fileinput.input():
template_stg += "\n" + line
processor = TemplateProcessor(template_stg)
expanded_stg = processor.do()
sys.stdout.write(expanded_stg)
| BioModelTools/TemplateSB | run.py | Python | mit | 543 |
from bottle import route, default_app
app = default_app()
data = {
"id": 78874,
"seriesName": "Firefly",
"aliases": [
"Serenity"
],
"banner": "graphical/78874-g3.jpg",
"seriesId": "7097",
"status": "Ended",
"firstAired": "2002-09-20",
"network": "FOX (US)",
"networkId": "",
"runtime": "45",
"genre": [
"Drama",
"Science-Fiction"
],
"overview": "In the far-distant future, Captain Malcolm \"Mal\" Reynolds is a renegade former brown-coat sergeant, now turned smuggler & rogue, "
"who is the commander of a small spacecraft, with a loyal hand-picked crew made up of the first mate, Zoe Warren; the pilot Hoban \"Wash\" Washburn; "
"the gung-ho grunt Jayne Cobb; the engineer Kaylee Frye; the fugitives Dr. Simon Tam and his psychic sister River. "
"Together, they travel the far reaches of space in search of food, money, and anything to live on.",
"lastUpdated": 1486759680,
"airsDayOfWeek": "",
"airsTime": "",
"rating": "TV-14",
"imdbId": "tt0303461",
"zap2itId": "EP00524463",
"added": "",
"addedBy": None,
"siteRating": 9.5,
"siteRatingCount": 472,
}
@route('/api')
def api():
return data
| romanvm/WsgiBoostServer | benchmarks/test_app.py | Python | mit | 1,229 |
#!/usr/bin/env python
# A Raspberry Pi GPIO based relay device
import RPi.GPIO as GPIO
from common.adafruit.Adafruit_MCP230xx.Adafruit_MCP230xx import Adafruit_MCP230XX
class Relay(object):
_mcp23017_chip = {} # Conceivably, we could have up to 8 of these as there are a possibility of 8 MCP chips on a bus.
def __init__(self, mcp_pin, i2c_address=0x27):
"""
Initialize a relay
:param mcp_pin: BCM gpio number that is connected to a relay
:return:
"""
self.ON = 0
self.OFF = 1
self._i2c_address = i2c_address
self._mcp_pin = mcp_pin
if GPIO.RPI_REVISION == 1:
i2c_busnum = 0
else:
i2c_busnum = 1
if not self._mcp23017_chip.has_key(self._i2c_address):
self._mcp23017_chip[self._i2c_address] = Adafruit_MCP230XX(busnum=i2c_busnum, address=self._i2c_address, num_gpios=16)
self._relay = self._mcp23017_chip[self._i2c_address]
self._relay.config(self._mcp_pin, self._relay.OUTPUT)
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
def set_state(self, state):
"""
Set the state of the relay. relay.ON, relay.OFF
:param state:
:return:
"""
if state == self.ON:
self._relay.output(self._mcp_pin, self.ON)
self.state = self.ON
elif state == self.OFF:
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
def toggle(self):
"""
Toggle the state of a relay
:return:
"""
if self.state == self.ON:
self._relay.output(self._mcp_pin, self.OFF)
self.state = self.OFF
else:
self._relay.output(self._mcp_pin, self.ON)
self.state = self.ON
def get_state(self):
return self.state
if __name__ == '__main__':
import time
pause = .15
for pin in range(16):
print("Pin: %s" % pin)
r = Relay(pin)
r.set_state(r.ON)
time.sleep(pause)
r.set_state(r.OFF)
time.sleep(pause)
r.toggle()
time.sleep(pause)
r.toggle()
time.sleep(pause)
r1 = Relay(10)
r2 = Relay(2)
r3 = Relay(15)
r1.set_state(r1.ON)
print(r1._mcp_pin)
r2.set_state(r2.ON)
print(r2._mcp_pin)
r3.set_state(r3.ON)
print(r3._mcp_pin)
time.sleep(1)
r1.set_state(r1.OFF)
r2.set_state(r2.OFF)
r3.set_state(r3.OFF) | mecworks/garden_pi | common/relay.py | Python | mit | 2,522 |
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.bcrypt import Bcrypt
from flask_sockets import Sockets
app = Flask(__name__, static_folder="../static/dist", template_folder="../static")
if os.environ.get('PRODUCTION'):
app.config.from_object('config.ProductionConfig')
else:
app.config.from_object('config.TestingConfig')
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
sockets = Sockets(app)
| mortbauer/webapp | application/__init__.py | Python | mit | 444 |
# Generated by Django 2.2.12 on 2020-08-23 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_board', '0004_jobpost_is_from_recruiting_agency'),
]
operations = [
migrations.AlterField(
model_name='jobpost',
name='location',
field=models.CharField(choices=[('CH', 'Chicago'), ('CT', 'Chicago and Temporarily Remote'), ('CR', 'Chicago and Remote'), ('RO', 'Remote Only')], default='CH', help_text='ChiPy is a locally based group. Position must not move candidate out of the Chicago area. Working remote or commuting is acceptable. Any position requiring relocation out of the Chicago land is out of scope of the mission of the group.', max_length=2),
),
]
| chicagopython/chipy.org | chipy_org/apps/job_board/migrations/0005_auto_20200823_0726.py | Python | mit | 793 |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import tree
from subprocess import call
# https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.names
#
# TODO: Load up the mushroom dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled.
# Header information is on the dataset's website at the UCI ML Repo
# Check NA Encoding
X = pd.read_csv('Datasets/agaricus-lepiota.data', names=['label', 'cap-shape', 'cap-surface', 'cap-color',
'bruises', 'odor', 'gill-attachment',
'gill-spacing', 'gill-size', 'gill-color',
'stalk-shape', 'stalk-root',
'stalk-surface-above-ring',
'stalk-surface-below-ring', 'stalk-color-above-ring',
'stalk-color-below-ring', ' veil-type', 'veil-color',
'ring-number', 'ring-type', 'spore-print-colo', 'population',
'habitat'], header=None)
# INFO: An easy way to show which rows have nans in them
# print X[pd.isnull(X).any(axis=1)]
#
# TODO: Go ahead and drop any row with a nan
X.replace(to_replace='?', value=np.NaN, inplace=True)
X.dropna(axis=0, inplace=True)
print(X.shape)
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
X['label'] = X['label'].map({'e': 1, 'p': 0})
y = X['label'].copy()
X.drop(labels=['label'], axis=1, inplace=True)
#
# TODO: Encode the entire dataset using dummies
X = pd.get_dummies(X)
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
#
# TODO: Create an DT classifier. No need to set any parameters
model = tree.DecisionTreeClassifier()
#
# TODO: train the classifier on the training data / labels:
# TODO: score the classifier on the testing data / labels:
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print('High-Dimensionality Score: %f' % round((score * 100), 3))
#
# TODO: Use the code on the courses SciKit-Learn page to output a .DOT file
# Then render the .DOT to .PNGs. Ensure you have graphviz installed.
# If not, `brew install graphviz. If you can't, use: http://webgraphviz.com/
tree.export_graphviz(model.tree_, out_file='tree.dot', feature_names=X.columns)
| Wittlich/DAT210x-Python | Module6/assignment5.py | Python | mit | 2,871 |
import os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class DBConnector():
'''
where every row is the details one employee was paid for an entire month.
'''
@classmethod
def get_session(cls):
database_path = os.environ["SQL_DATABASE"]
engine = create_engine(database_path)
session = sessionmaker(bind=engine)()
return session
| JasonThomasData/payslip_code_test | app/models/db_connector.py | Python | mit | 498 |
#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
| jadams/rarbg-get | rarbg-get.py | Python | mit | 409 |
"""
Module containing classes for HTTP client/server interactions
"""
# Python 2.x/3.x compatibility imports
try:
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
except ImportError:
from urllib2 import HTTPError, URLError
from urllib import urlencode
import socket
from pyowm.exceptions import api_call_error, unauthorized_error, not_found_error
from pyowm.webapi25.configuration25 import ROOT_API_URL
class WeatherHttpClient(object):
API_SUBSCRIPTION_SUBDOMAINS = {
'free': 'api',
'pro': 'pro'
}
"""
An HTTP client class for the OWM web API. The class can leverage a
caching mechanism
:param API_key: a Unicode object representing the OWM web API key
:type API_key: Unicode
:param cache: an *OWMCache* concrete instance that will be used to
cache OWM web API responses.
:type cache: an *OWMCache* concrete instance
:param subscription_type: the type of OWM web API subscription to be wrapped.
The value is used to pick the proper API subdomain for HTTP calls.
Defaults to: 'free'
:type subscription_type: str
"""
def __init__(self, API_key, cache, subscription_type='free'):
self._API_key = API_key
self._cache = cache
self._API_root_URL = ROOT_API_URL % \
(self.API_SUBSCRIPTION_SUBDOMAINS[subscription_type],)
def _lookup_cache_or_invoke_API(self, cache, API_full_url, timeout):
cached = cache.get(API_full_url)
if cached:
return cached
else:
try:
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(API_full_url, None, timeout)
except HTTPError as e:
if '401' in str(e):
raise unauthorized_error.UnauthorizedError('Invalid API key')
if '404' in str(e):
raise not_found_error.NotFoundError('The resource was not found')
if '502' in str(e):
raise api_call_error.BadGatewayError(str(e), e)
except URLError as e:
raise api_call_error.APICallError(str(e), e)
else:
data = response.read().decode('utf-8')
cache.set(API_full_url, data)
return data
def call_API(self, API_endpoint_URL, params_dict,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""
Invokes a specific OWM web API endpoint URL, returning raw JSON data.
:param API_endpoint_URL: the API endpoint to be invoked
:type API_endpoint_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:param timeout: how many seconds to wait for connection establishment
(defaults to ``socket._GLOBAL_DEFAULT_TIMEOUT``)
:type timeout: int
:returns: a string containing raw JSON data
:raises: *APICallError*
"""
url = self._build_full_URL(API_endpoint_URL, params_dict)
return self._lookup_cache_or_invoke_API(self._cache, url, timeout)
def _build_full_URL(self, API_endpoint_URL, params_dict):
"""
Adds the API key and the query parameters dictionary to the specified
API endpoint URL, returning a complete HTTP request URL.
:param API_endpoint_URL: the API endpoint base URL
:type API_endpoint_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:param API_key: the OWM web API key
:type API_key: str
:returns: a full string HTTP request URL
"""
url =self._API_root_URL + API_endpoint_URL
params = params_dict.copy()
if self._API_key is not None:
params['APPID'] = self._API_key
return self._build_query_parameters(url, params)
def _build_query_parameters(self, base_URL, params_dict):
"""
Turns dictionary items into query parameters and adds them to the base
URL
:param base_URL: the base URL whom the query parameters must be added
to
:type base_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:returns: a full string HTTP request URL
"""
return base_URL + '?' + urlencode(params_dict)
def __repr__(self):
return "<%s.%s - cache=%s>" % \
(__name__, self.__class__.__name__, repr(self._cache))
| mpvoss/RickAndMortyWeatherTweets | env/lib/python3.5/site-packages/pyowm/commons/weather_client.py | Python | mit | 4,935 |
import unittest
import itertools
class TestWorld(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.components = self
self.entities = set()
self.new_entity_id = itertools.count().__next__
self.new_entity_id() # skip id 0
for comp in list(kw.values()):
comp.world = self
class TestComponent(dict):
def __init__(self):
self.entities = set()
def set(self, entity):
data = TestData()
self[entity] = data
self.entities.add(entity)
return data
def remove(self, entity):
del self[entity]
class TestData(object):
attr = 'deadbeef'
def __init__(self, **kw):
self.__dict__.update(kw)
class EntityTestCase(unittest.TestCase):
def test_repr(self):
from grease import Entity
entity = Entity(TestWorld())
self.assertTrue(repr(entity).startswith(
'<Entity id: %s of TestWorld' % entity.entity_id),
('<Entity id: %s of TestWorld' % entity.entity_id, repr(entity)))
def test_accessor_getattr_for_nonexistant_component(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
self.assertTrue(entity not in comp)
self.assertRaises(AttributeError, getattr, entity, 'foo')
def test_accessor_getattr_for_non_member_entity(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
accessor = entity.test
self.assertFalse(entity in comp)
self.assertRaises(AttributeError, getattr, accessor, 'attr')
def test_accessor_getattr_for_member_entity(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
comp.set(entity)
self.assertTrue(entity in comp)
self.assertEqual(entity.test.attr, 'deadbeef')
def test_accessor_setattr_adds_non_member_entity(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
self.assertFalse(entity in comp)
entity.test.attr = 'foobar'
self.assertEqual(entity.test.attr, 'foobar')
self.assertTrue(entity in comp)
def test_accessor_setattr_for_member_entity(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
comp.set(entity)
self.assertNotEqual(entity.test.attr, 'spam')
entity.test.attr = 'spam'
self.assertTrue(entity in comp)
self.assertEqual(entity.test.attr, 'spam')
def test_eq(self):
from grease import Entity
world = TestWorld()
e1 = Entity(world)
e2 = Entity(world)
self.assertNotEqual(e1, e2)
e2.entity_id = e1.entity_id
self.assertEqual(e1, e2)
otherworld = TestWorld()
e3 = Entity(otherworld)
self.assertNotEqual(e1, e3)
self.assertNotEqual(e2, e3)
e3.entity_id = e1.entity_id
self.assertNotEqual(e1, e3)
self.assertNotEqual(e2, e3)
def test_delattr(self):
from grease import Entity
comp = TestComponent()
world = TestWorld(test=comp)
entity = Entity(world)
comp.set(entity)
self.assertTrue(entity in comp)
del entity.test
self.assertFalse(entity in comp)
def test_entity_id(self):
from grease import Entity
world = TestWorld()
entity1 = Entity(world)
entity2 = Entity(world)
self.assertTrue(entity1.entity_id > 0)
self.assertTrue(entity2.entity_id > 0)
self.assertNotEqual(entity1.entity_id, entity2.entity_id)
def test_delete_exists(self):
from grease import Entity
world = TestWorld()
self.assertEqual(world.entities, set())
entity1 = Entity(world)
entity2 = Entity(world)
self.assertEqual(world.entities, set([entity1, entity2]))
self.assertTrue(entity1.exists)
self.assertTrue(entity2.exists)
entity1.delete()
self.assertEqual(world.entities, set([entity2]))
self.assertFalse(entity1.exists)
self.assertTrue(entity2.exists)
entity2.delete()
self.assertEqual(world.entities, set())
self.assertFalse(entity1.exists)
self.assertFalse(entity2.exists)
def test_entity_subclass_slots(self):
from grease import Entity
class NewEntity(Entity):
pass
world = TestWorld()
entity = NewEntity(world)
self.assertRaises(AttributeError, setattr, entity, 'notanattr', 1234)
def test_entity_subclass_cant_have_slots(self):
from grease import Entity
self.assertRaises(TypeError,
type, 'Test', (Entity,), {'__slots__': ('foo', 'bar')})
def test_entity_subclass_init(self):
from grease import Entity
stuff = []
class TestEntity(Entity):
def __init__(self, world, other):
stuff.append(world)
stuff.append(other)
world = TestWorld()
TestEntity(world, self)
self.assertEqual(stuff, [world, self])
class EntityComponentAccessorTestCase(unittest.TestCase):
def test_getattr(self):
from grease.entity import EntityComponentAccessor
from grease import Entity
world = TestWorld()
entity = Entity(world)
component = {entity: TestData(foo=5)}
accessor = EntityComponentAccessor(component, entity)
self.assertEqual(accessor.foo, 5)
self.assertRaises(AttributeError, getattr, accessor, 'bar')
entity2 = Entity(world)
accessor = EntityComponentAccessor(component, entity2)
self.assertRaises(AttributeError, getattr, accessor, 'foo')
self.assertRaises(AttributeError, getattr, accessor, 'bar')
def test_setattr_member_entity(self):
from grease.entity import EntityComponentAccessor
from grease import Entity
world = TestWorld()
entity = Entity(world)
data = TestData(foo=5)
accessor = EntityComponentAccessor({entity: data}, entity)
self.assertEqual(data.foo, 5)
accessor.foo = 66
self.assertEqual(data.foo, 66)
accessor.bar = '!!'
self.assertEqual(data.bar, '!!')
def test_setattr_nonmember_entity(self):
from grease.entity import EntityComponentAccessor
from grease import Entity
world = TestWorld()
entity = Entity(world)
component = TestComponent()
accessor = EntityComponentAccessor(component, entity)
self.assertRaises(AttributeError, getattr, entity, 'baz')
self.assertTrue(entity not in component)
accessor.baz = 1000
self.assertTrue(entity in component)
self.assertEqual(accessor.baz, 1000)
self.assertEqual(component[entity].baz, 1000)
def test_truthiness(self):
from grease.entity import EntityComponentAccessor
from grease import Entity
world = TestWorld()
entity = Entity(world)
component = TestComponent()
accessor = EntityComponentAccessor(component, entity)
self.assertFalse(accessor)
component[entity] = 456
self.assertTrue(accessor)
if __name__ == '__main__':
unittest.main()
| caseman/grease | test/entity_test.py | Python | mit | 6,470 |
from django.db import models
from django.contrib.sites.models import Site
# Create your models here.
class Link(models.Model):
url = models.URLField(max_length=512)
site = models.ForeignKey(Site, on_delete=models.SET_NULL, null=True)
request_times = models.PositiveIntegerField(default=0)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}-{}'.format(self.pk, self.url)
class RateLimit(models.Model):
ip = models.GenericIPAddressField(unique=True)
start_time = models.DateTimeField()
count = models.PositiveIntegerField(default=0)
def __str__(self):
return self.ip
| typefj/django-miniurl | shortener/models.py | Python | mit | 702 |
class InvalidValueState(ValueError):
pass
| szopu/datadiffs | datadiffs/exceptions.py | Python | mit | 46 |
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
import numpy as np
import sys
import scipy
from scipy import stats
data_file = sys.argv[1]
data = np.loadtxt(data_file)
slope, intercept, r_value, p_value, std_err = stats.linregress(data[499:2499,0], data[499:2499,1])
nf = open('linear_reg.dat', 'w')
nf.write("Linear Regression for data between %5d ps (frame: 499) and %5d ps (frame 2499) \n" %(data[499][0], data[2499][0]))
nf.write("slope: %10.5E Angstrom^2 ps^-1 \n" %(slope))
nf.write("intercept: %10.5E Angstrom^2\n" %(intercept))
nf.write("R^2: %10.5f \n" %(r_value**2))
nf.write('Diffusion coeff: %10.5E Angstrom^2 ps^-1$ \n' %(slope/6.0))
nf.write('Diffusion coeff: %10.5E m^2 s^-1$ \n' %(slope*10**(-8)/6.0))
nf.close()
| rbdavid/MolecDynamics | Analysis/MSD/slope.py | Python | mit | 754 |
#!/usr/bin/env python3
import sys
import os
import urllib.request
import path_utils
# credit: https://stackoverflow.com/questions/22676/how-to-download-a-file-over-http
def download_url(source_url, target_path):
if os.path.exists(target_path):
return False, "Target path [%s] already exists" % target_path
contents = None
try:
with urllib.request.urlopen(source_url) as f:
contents = f.read().decode("utf8")
except urllib.error.HTTPError as httpex:
return False, "Downloading failed: [%s]" % httpex
with open(target_path, "w") as f:
f.write(contents)
return True, None
def puaq():
print("Usage: %s source_url target_path" % path_utils.basename_filtered(__file__))
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) < 3:
puaq()
source_url = sys.argv[1]
target_path = sys.argv[2]
v, r = download_url(source_url, target_path)
if not v:
print(r)
sys.exit(1)
| mvendra/mvtools | download_url.py | Python | mit | 992 |
# Plot histogram
import os
import numpy as np
from plantcv.plantcv.threshold import binary as binary_threshold
from plantcv.plantcv import params
from plantcv.plantcv import fatal_error
from plantcv.plantcv._debug import _debug
import pandas as pd
from plotnine import ggplot, aes, geom_line, labels, scale_color_manual
def _hist_gray(gray_img, bins, lower_bound, upper_bound, mask=None):
""" Prepare the ready to plot histogram data
Inputs:
gray_img = grayscale image to analyze
bins = divide the data into n evenly spaced bins
lower_bound = the lower bound of the bins (x-axis min value)
upper_bound = the upper bound of the bins (x-axis max value)
mask = binary mask, calculate histogram from masked area only (default=None)
Returns:
bin_labels = an array of histogram bin labels
hist_percent = an array of histogram represented by percent values
hist_gray_data = an array of histogram (original values)
:param gray_img: numpy.ndarray
:param bins: int
:param lower_bound: int
:param upper_bound: int
:param mask: numpy.ndarray
:return bin_labels: numpy.ndarray
:return hist_percent: numpy.ndarray
:return hist_gray_data: numpy.ndarray
"""
params.device += 1
debug = params.debug
# Apply mask if one is supplied
if mask is not None:
min_val = np.min(gray_img)
pixels = len(np.where(mask > 0)[0])
# apply plant shaped mask to image
params.debug = None
mask1 = binary_threshold(mask, 0, 255, 'light')
mask1 = (mask1 / 255)
masked = np.where(mask1 != 0, gray_img, min_val - 5000)
else:
pixels = gray_img.shape[0] * gray_img.shape[1]
masked = gray_img
params.debug = debug
# Store histogram data
hist_gray_data, hist_bins = np.histogram(masked, bins, (lower_bound, upper_bound))
# make hist percentage for plotting
hist_percent = (hist_gray_data / float(pixels)) * 100
# use middle value of every bin as bin label
bin_labels = np.array([np.average([hist_bins[i], hist_bins[i+1]]) for i in range(0, len(hist_bins) - 1)])
return bin_labels, hist_percent, hist_gray_data
# hist_data = pd.DataFrame({'pixel intensity': bin_labels, 'proportion of pixels (%)': hist_percent})
# return hist_data
def histogram(img, mask=None, bins=100, lower_bound=None, upper_bound=None, title=None, hist_data=False):
"""Plot histograms of each input image channel
Inputs:
img = an RGB or grayscale image to analyze
mask = binary mask, calculate histogram from masked area only (default=None)
bins = divide the data into n evenly spaced bins (default=100)
lower_bound = the lower bound of the bins (x-axis min value) (default=None)
upper_bound = the upper bound of the bins (x-axis max value) (default=None)
title = a custom title for the plot (default=None)
hist_data = return the frequency distribution data if True (default=False)
Returns:
fig_hist = histogram figure
hist_df = dataframe with histogram data, with columns "pixel intensity" and "proportion of pixels (%)"
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param bins: int
:param lower_bound: int
:param upper_bound: int
:param title: str
:param hist_data: bool
:return fig_hist: plotnine.ggplot.ggplot
:return hist_df: pandas.core.frame.DataFrame
"""
if not isinstance(img, np.ndarray):
fatal_error("Only image of type numpy.ndarray is supported input!")
if len(img.shape) < 2:
fatal_error("Input image should be at least a 2d array!")
if mask is not None:
masked = img[np.where(mask > 0)]
img_min, img_max = np.nanmin(masked), np.nanmax(masked)
else:
img_min, img_max = np.nanmin(img), np.nanmax(img)
# for lower / upper bound, if given, use the given value, otherwise, use the min / max of the image
lower_bound = lower_bound if lower_bound is not None else img_min
upper_bound = upper_bound if upper_bound is not None else img_max
if len(img.shape) > 2:
if img.shape[2] == 3:
b_names = ['blue', 'green', 'red']
else:
b_names = [str(i) for i in range(img.shape[2])]
if len(img.shape) == 2:
bin_labels, hist_percent, hist_ = _hist_gray(img, bins=bins, lower_bound=lower_bound, upper_bound=upper_bound,
mask=mask)
hist_df = pd.DataFrame(
{'pixel intensity': bin_labels, 'proportion of pixels (%)': hist_percent, 'hist_count': hist_,
'color channel': ['0' for _ in range(len(hist_percent))]})
else:
# Assumption: RGB image
# Initialize dataframe column arrays
px_int = np.array([])
prop = np.array([])
hist_count = np.array([])
channel = []
for (b, b_name) in enumerate(b_names):
bin_labels, hist_percent, hist_ = _hist_gray(img[:, :, b], bins=bins, lower_bound=lower_bound,
upper_bound=upper_bound, mask=mask)
# Append histogram data for each channel
px_int = np.append(px_int, bin_labels)
prop = np.append(prop, hist_percent)
hist_count = np.append(hist_count, hist_)
channel = channel + [b_name for _ in range(len(hist_percent))]
# Create dataframe
hist_df = pd.DataFrame(
{'pixel intensity': px_int, 'proportion of pixels (%)': prop, 'hist_count': hist_count,
'color channel': channel})
fig_hist = (ggplot(data=hist_df,
mapping=aes(x='pixel intensity', y='proportion of pixels (%)', color='color channel'))
+ geom_line())
if title is not None:
fig_hist = fig_hist + labels.ggtitle(title)
if len(img.shape) > 2 and img.shape[2] == 3:
fig_hist = fig_hist + scale_color_manual(['blue', 'green', 'red'])
# Plot or print the histogram
_debug(visual=fig_hist, filename=os.path.join(params.debug_outdir, str(params.device) + '_hist.png'))
if hist_data is True:
return fig_hist, hist_df
return fig_hist
| stiphyMT/plantcv | plantcv/plantcv/visualize/histogram.py | Python | mit | 6,304 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0002_auto_20160717_1939'),
]
operations = [
migrations.AlterField(
model_name='document',
name='name',
field=models.CharField(unique=True, max_length=32),
),
]
| sdrogers/ms2ldaviz | ms2ldaviz/basicviz/migrations/0003_auto_20160717_1943.py | Python | mit | 416 |
# -*- coding: utf-8 -*-
# pylint: disable=not-context-manager,useless-object-inheritance
# NOTE: The pylint not-content-manager warning is disabled pending the fix of
# a bug in pylint https://github.com/PyCQA/pylint/issues/782
# NOTE: useless-object-inheritance needed for Python 2.x compatability
"""This module contains the classes underlying SoCo's caching system."""
from __future__ import unicode_literals
import threading
from time import time
from . import config
from .compat import dumps
class _BaseCache(object):
"""An abstract base class for the cache."""
# pylint: disable=no-self-use, unused-argument
def __init__(self, *args, **kwargs):
super().__init__()
self._cache = {}
#: `bool`: whether the cache is enabled
self.enabled = True
def put(self, item, *args, **kwargs):
"""Put an item into the cache."""
raise NotImplementedError
def get(self, *args, **kwargs):
"""Get an item from the cache."""
raise NotImplementedError
def delete(self, *args, **kwargs):
"""Delete an item from the cache."""
raise NotImplementedError
def clear(self):
"""Empty the whole cache."""
raise NotImplementedError
class NullCache(_BaseCache):
"""A cache which does nothing.
Useful for debugging.
"""
def put(self, item, *args, **kwargs):
"""Put an item into the cache."""
def get(self, *args, **kwargs):
"""Get an item from the cache."""
return None
def delete(self, *args, **kwargs):
"""Delete an item from the cache."""
def clear(self):
"""Empty the whole cache."""
class TimedCache(_BaseCache):
"""A simple thread-safe cache for caching method return values.
The cache key is generated by from the given ``*args`` and ``**kwargs``.
Items are expired from the cache after a given period of time.
Example:
>>> from time import sleep
>>> cache = TimedCache()
>>> cache.put("item", 'some', kw='args', timeout=3)
>>> # Fetch the item again, by providing the same args and kwargs.
>>> assert cache.get('some', kw='args') == "item"
>>> # Providing different args or kwargs will not return the item.
>>> assert not cache.get('some', 'otherargs') == "item"
>>> # Waiting for less than the provided timeout does not cause the
>>> # item to expire.
>>> sleep(2)
>>> assert cache.get('some', kw='args') == "item"
>>> # But waiting for longer does.
>>> sleep(2)
>>> assert not cache.get('some', kw='args') == "item"
Warning:
At present, the cache can theoretically grow and grow, since entries
are not automatically purged, though in practice this is unlikely
since there are not that many different combinations of arguments in
the places where it is used in SoCo, so not that many different
cache entries will be created. If this becomes a problem,
use a thread and timer to purge the cache, or rewrite this to use
LRU logic!
"""
def __init__(self, default_timeout=0):
"""
Args:
default_timeout (int): The default number of seconds after
which items will be expired.
"""
super().__init__()
#: `int`: The default caching expiry interval in seconds.
self.default_timeout = default_timeout
# A thread lock for the cache
self._cache_lock = threading.Lock()
def get(self, *args, **kwargs):
"""Get an item from the cache for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments.
Returns:
object: The object which has been found in the cache, or `None` if
no unexpired item is found. This means that there is no point
storing an item in the cache if it is `None`.
"""
if not self.enabled:
return None
# Look in the cache to see if there is an unexpired item. If there is
# we can just return the cached result.
cache_key = self.make_key(args, kwargs)
# Lock and load
with self._cache_lock:
if cache_key in self._cache:
expirytime, item = self._cache[cache_key]
if expirytime >= time():
return item
else:
# An expired item is present - delete it
del self._cache[cache_key]
# Nothing found
return None
def put(self, item, *args, **kwargs):
"""Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached.
"""
if not self.enabled:
return
# Check for a timeout keyword, store and remove it.
timeout = kwargs.pop("timeout", None)
if timeout is None:
timeout = self.default_timeout
cache_key = self.make_key(args, kwargs)
# Store the item, along with the time at which it will expire
with self._cache_lock:
self._cache[cache_key] = (time() + timeout, item)
def delete(self, *args, **kwargs):
"""Delete an item from the cache for this combination of args and
kwargs."""
cache_key = self.make_key(args, kwargs)
with self._cache_lock:
try:
del self._cache[cache_key]
except KeyError:
pass
def clear(self):
"""Empty the whole cache."""
with self._cache_lock:
self._cache.clear()
@staticmethod
def make_key(*args, **kwargs):
"""Generate a unique, hashable, representation of the args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments.
Returns:
str: the key.
"""
# This is not entirely straightforward, since args and kwargs may
# contain mutable items and unicode. Possibilities include using
# __repr__, frozensets, and code from Py3's LRU cache. But pickle
# works, and although it is not as fast as some methods, it is good
# enough at the moment
cache_key = dumps((args, kwargs))
return cache_key
class Cache(NullCache):
"""A factory class which returns an instance of a cache subclass.
A `TimedCache` is returned, unless `config.CACHE_ENABLED` is `False`,
in which case a `NullCache` will be returned.
"""
def __new__(cls, *args, **kwargs):
if config.CACHE_ENABLED:
new_cls = TimedCache
else:
new_cls = NullCache
instance = super(Cache, cls).__new__(new_cls)
instance.__init__(*args, **kwargs)
return instance
| KennethNielsen/SoCo | soco/cache.py | Python | mit | 7,367 |
from logika import IGRALEC_R, IGRALEC_Y, PRAZNO, NEODLOCENO, NI_KONEC, MAKSIMALNO_STEVILO_POTEZ, nasprotnik
from five_logika import Five_logika
from powerup_logika import Powerup_logika, POWER_STOLPEC, POWER_ZETON, POWER_2X_NW, POWER_2X_W
from pop10_logika import Pop10_logika
from pop_logika import Pop_logika
import random
#######################
## ALGORITEM MINIMAX ##
#######################
class AlphaBeta:
# Algoritem alphabeta
def __init__(self, globina):
self.globina = globina # Kako globoko iščemo?
self.prekinitev = False # Želimo algoritem prekiniti?
self.igra = None # Objekt, ki predstavlja igro
self.jaz = None # Katerega igralca igramo?
self.poteza = None # Sem vpišemo potezo, ko jo najdemo
def prekini(self):
'''Metoda, ki jo pokliče GUI, če je treba nehati razmišljati, ker
je uporabnik zapr okno ali izbral novo igro.'''
self.prekinitev = True
def izracunaj_potezo(self, igra):
'''Izračunaj potezo za trenutno stanje dane igre.'''
# To metodo pokličemo iz vzporednega vlakna
self.igra = igra
self.jaz = self.igra.na_potezi
self.prekinitev = False # Glavno vlakno bo to nastavilo na True, če bomo morali prekiniti
self.poteza = None # Sem napišemo potezo, ko jo najdemo
# Poženemo alphabeta
(poteza, vrednost) = self.alphabeta(self.globina, -AlphaBeta.NESKONCNO, AlphaBeta.NESKONCNO, True)
self.jaz = None
self.igra = None
if not self.prekinitev:
# Nismo bili prekinjeni, torej potezo izvedemo
self.poteza = poteza
def uredi_poteze(self, poteze):
'''Vrne urejen seznam potez, ki ga nato uporabimo v alphabeta.'''
urejene_poteze = [] # Urejen seznam potez
if isinstance(self.igra, Five_logika):
# Imamo 5 v vrsto
zeljen_vrstni_red = [1,4,7] # Željen vrstni red, če so na voljo vse poteze
zeljen_vrstni_red = random.sample(zeljen_vrstni_red, 3)
for i in range(1,3):
dodajamo = [4-i,4+i] # Poteze, ki jih želimo dodati
dodajamo = random.sample(dodajamo, 2)
for j in dodajamo:
zeljen_vrstni_red.append(j)
elif isinstance(self.igra, Powerup_logika):
# Imamo Power Up igro
# Dodajmo dvojne poteze brez možnosti zmage
# Najprej dodamo te, ker če bi takšne z možnostjo zmage,
# bi jih (lahek) algoritem že na začetku porabil
zeljen_vrstni_red = [74]
for i in range(1,4):
zeljen_vrstni_red += random.sample([74+i, 74-i], 2)
# Dodajmo dvojne poteze z možno zmago
zeljen_vrstni_red.append(84)
for i in range(1,4):
zeljen_vrstni_red += random.sample([84+i, 84-i], 2)
# Dodajmo 'navadne' poteze
zeljen_vrstni_red.append(4)
for i in range(1,4):
zeljen_vrstni_red += random.sample([4+i, 4-i], 2)
# Dodajmo poteze, ki poteptajo stolpec pod sabo
zeljen_vrstni_red.append(14)
for i in range(1,4):
zeljen_vrstni_red += random.sample([14+i, 14-i], 2)
# Dodajmo poteze, ki odstranijo nasprotnikov žeton
zeljen_vrstni_red += random.sample([24+7*i for i in range(6)], 6)
for i in range(1,4):
dodajamo = [24+i+7*j for j in range(6)] + [24-i+7*j for j in range(6)]
zeljen_vrstni_red += random.sample(dodajamo, 12)
elif isinstance(self.igra, Pop10_logika):
# Imamo Pop 10 igro
if self.igra.faza == 1:
# Smo v fazi odstranjevanja žetonov
zeljen_vrstni_red = random.sample([18, 68, 25, 75], 4) # Središčni dve polji
dodajamo = [10, 11, 12, 17, 19, 24, 26, 31, 32, 33]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
dodajamo = [i for i in range(2, 7)] + [i for i in range(37, 42)] + [9+7*i for i in range(4)] + [13+7*i for i in range(4)]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
dodajamo = [1+7*i for i in range(6)] + [7+7*i for i in range(6)]
dodajamo += [50+i for i in dodajamo]
zeljen_vrstni_red += random.sample(dodajamo, len(dodajamo))
else:
# Smo v fazi dodajanja žetonov (lahko faza 0 ali 2)
zeljen_vrstni_red = [4]
for i in range(1,4):
zeljen_vrstni_red += random.sample([4+i, 4-i], 2)
else:
# Imamo 4 v vrsto ali Pop Out
zeljen_vrstni_red = [4,-4] # Željen vrstni red, če so na voljo vse poteze
for i in range(1,4):
dodajamo = [4-i,-4+i,4+i,-4-i] # Poteze, ki jih želimo dodati
dodajamo = random.sample(dodajamo, 4)
for j in dodajamo:
zeljen_vrstni_red.append(j)
for i in zeljen_vrstni_red:
if i in poteze:
# Poteza je na voljo, treba jo je dodati
urejene_poteze.append(i)
else:
# Poteza ni na voljo
continue
return urejene_poteze
# Vrednosti igre
ZMAGA = 10**5
NESKONCNO = ZMAGA + 1 # Več kot zmaga
def vrednost_pozicije(self):
'''Vrne oceno vrednosti polozaja.'''
vrednost = 0
if self.igra is None:
# Če bi se slučajno zgodilo, da ne bi bila izbrana nobena igra
return vrednost
elif self.igra.na_potezi is None:
# Igre je konec
# Sem ne bi smeli nikoli priti zaradi if stavkov v alphabeta
return vrednost
else:
delez = 0.8 # Faktor za katerega mu je izguba manj vredna kot dobiček
tocke = [0, 0] # Sem bomo shranili število točk igralcev [R,Y]
# Najprej preverimo kateri tip igre imamo
if isinstance(self.igra, Five_logika):
# Imamo 5 v vrsto, torej imamo zmagovalne štirke (robne)
# ter petke, pokličimo jih spodaj
stirke_R = self.igra.stirke_R
stirke_Y = self.igra.stirke_Y
petke = self.igra.petke
# Pojdimo skozi vse štirke & petke ter jih primerno ovrednotimo
# Štirke / petke, ki vsebujejo žetone obeh igralcev so vredne 0 točk
# Prazne petke so vredne 0.1 točke
# Štirke so vredne 0.2 + a/5 točke, kjer je a število žetonov v štirki,
# če je igralec pravilne barve za to štirko.
# Petke so vredne a/5 točke, kjer je a število žetonov v petki.
for s in stirke_R: # Štirke na voljo rdečemu
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
if IGRALEC_Y in stirka:
continue
else:
tocke[0] += 0.2 + stirka.count(IGRALEC_R) / 5
for s in stirke_Y: # Štirke na voljo rumenemu
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
if IGRALEC_R in stirka:
continue
else:
tocke[1] += 0.2 + stirka.count(IGRALEC_Y) / 5
for p in petke:
((i1,j1),(i2,j2),(i3,j3),(i4,j4),(i5,j5)) = p
petka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4],
self.igra.polozaj[i5][j5]]
barve = list(set(stirka))
if len(barve) == 2:
if PRAZNO in barve:
# V petki so žetoni samo 1 barve
b = list(set(barve) - set([PRAZNO]))[0]
if b == IGRALEC_R:
tocke[0] += petka.count(b) / 5
else:
tocke[1] += petka.count(b) / 5
else:
# V petki so rdeči in rumeni
continue
elif barve == [PRAZNO]:
# Petka je prazna
tocke[0] += 0.1
tocke[1] += 0.1
else:
# V petki so rumeni in rdeči žetoni
continue
elif isinstance(self.igra, Pop10_logika):
# Naš cilj tukaj je, da bi imeli čim več štirk in še pomembneje,
# da bi izločili čim več žetonov
vrednost_tocke = AlphaBeta.ZMAGA / 30 # Da ne bomo nikoli imeli > ZMAGA brez da smo zmagali. To je vbistvu vrednost zmagovalne štirke.
for s in self.igra.stirke:
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
tocke[0] += stirka.count(IGRALEC_R) / 4 / (10-self.igra.odstranjeni[0])
tocke[1] += stirka.count(IGRALEC_Y) / 4 / (10-self.igra.odstranjeni[1])
vrednost_razlike_ods = (self.igra.odstranjeni[0] - self.igra.odstranjeni[1]) * 3 # Vrednost razlike odstranjenih
if self.jaz == IGRALEC_R:
vrednost += (tocke[0] - delez*tocke[1] + vrednost_razlike_ods) * vrednost_tocke
elif self.jaz == IGRALEC_Y:
vrednost += (tocke[1] - delez*tocke[0] - vrednost_razlike_ods) * vrednost_tocke
vrednost *= 0.984**(max(self.igra.stevilo_potez - 42, 0)) / 10
return vrednost
else:
# Imamo normalno, popout ali powerup igro
# Pojdimo sedaj skozi vse možne zmagovalne štirke in jih
# primerno ovrednotimo
# Stirke, ki ze vsebujejo zetone obeh igralec so vredne 0 tock
# Prazne stirke so vredne 0.1 tocke
# Ostale so vredne a/4 tock, kjer je a stevilo zetonov znotraj stirke
for s in self.igra.stirke:
((i1,j1),(i2,j2),(i3,j3),(i4,j4)) = s
stirka = [self.igra.polozaj[i1][j1], self.igra.polozaj[i2][j2],
self.igra.polozaj[i3][j3], self.igra.polozaj[i4][j4]]
barve = list(set(stirka))
# barve bo dolžine 2 ali 3, če bi bilo dolžine 1,
# bi bilo igre že konec
if len(barve) == 2:
if PRAZNO in barve:
# V štirki so žetoni samo 1 barve
b = list(set(barve) - set([PRAZNO]))[0]
if b == IGRALEC_R:
tocke[0] += stirka.count(b) / 4
else:
tocke[1] += stirka.count(b) / 4
else:
continue
elif barve == [PRAZNO]:
# Štirka je prazna
tocke[0] += 0.1
tocke[1] += 0.1
else:
# V štirki so rumene in rdeče
continue
if self.jaz == IGRALEC_R:
vrednost += (tocke[0] - delez*tocke[1]) / 69 * 0.1 * AlphaBeta.ZMAGA
else:
vrednost += (tocke[1] - delez*tocke[0]) / 69 * 0.1 * AlphaBeta.ZMAGA
if isinstance(self.igra, Pop_logika):
k = 0.984**self.igra.stevilo_potez
elif isinstance(self.igra, Powerup_logika):
k = 1 - self.igra.stevilo_potez / (2*58)
else:
k = 1 - self.igra.stevilo_potez / (2*6*7)
vrednost *= k
return vrednost
def alphabeta(self, globina, alpha, beta, maksimiziramo):
'''Glavna metoda AlphaBeta.
Vrne zmagovalno potezo in njeno vrednost, če jo najde, sicer (None, 0).'''
if self.prekinitev:
# Sporočili so nam, da moramo prekiniti
return (None, 0)
(zmagovalec, stirka) = self.igra.stanje_igre()
if zmagovalec in (IGRALEC_R, IGRALEC_Y, NEODLOCENO):
if isinstance(self.igra, Pop10_logika):
k = 0.984**(max(self.igra.stevilo_potez - 42, 0))
elif isinstance(self.igra, Pop_logika):
k = 0.984**self.igra.stevilo_potez
elif isinstance(self.igra, Powerup_logika):
k = 1 - self.igra.stevilo_potez / (2*58) # Kjer je 58 max število potez v tej igri
else:
k = 1 - self.igra.stevilo_potez / (2*6*7)
# Igre je konec, vrnemo njeno vrednost
if zmagovalec == self.jaz:
return (None, AlphaBeta.ZMAGA * k)
elif zmagovalec == nasprotnik(self.jaz):
return (None, -AlphaBeta.ZMAGA * k)
else:
return (None, 0)
elif zmagovalec == NI_KONEC:
# Igre ni konec
if globina == 0:
return (None, self.vrednost_pozicije())
else:
# Naredimo en korak alphabeta metode
if maksimiziramo:
# Maksimiziramo
najboljsa_poteza = None
for p in self.uredi_poteze(self.igra.veljavne_poteze()):
self.igra.povleci_potezo(p, True)
if (p > 70 and isinstance(self.igra, Powerup_logika)) or (isinstance(self.igra, Pop10_logika) and self.igra.faza == 2):
# Imamo dvojno potezo
for p2 in self.uredi_poteze(self.igra.veljavne_poteze()):
self.igra.povleci_potezo(p2, True)
vrednost = self.alphabeta(max(globina-2, 0), alpha, beta, not maksimiziramo)[1]
self.igra.razveljavi()
if vrednost > alpha:
najboljsa_poteza = [p, p2]
alpha = vrednost
if najboljsa_poteza is None:
najboljsa_poteza = [p, p2]
if beta <= alpha:
break
self.igra.razveljavi()
if beta <= alpha:
break
else:
vrednost = self.alphabeta(globina-1, alpha, beta, not maksimiziramo)[1]
self.igra.razveljavi()
if vrednost > alpha:
najboljsa_poteza = p
alpha = vrednost
if najboljsa_poteza is None:
najboljsa_poteza = p
if beta <= alpha:
break
else:
# Minimiziramo
najboljsa_poteza = None
for p in self.uredi_poteze(self.igra.veljavne_poteze()):
self.igra.povleci_potezo(p, True)
if (p > 70 and isinstance(self.igra, Powerup_logika)) or (isinstance(self.igra, Pop10_logika) and self.igra.faza == 2):
# Imamo dvojno potezo
for p2 in self.uredi_poteze(self.igra.veljavne_poteze()):
self.igra.povleci_potezo(p2, True)
vrednost = self.alphabeta(max(globina-2, 0), alpha, beta, not maksimiziramo)[1]
self.igra.razveljavi()
if vrednost < beta:
najboljsa_poteza = [p, p2]
beta = vrednost
if najboljsa_poteza is None:
najboljsa_poteza = [p, p2]
if beta <= alpha:
break
self.igra.razveljavi()
if beta <= alpha:
break
else:
vrednost = self.alphabeta(globina-1, alpha, beta, not maksimiziramo)[1]
self.igra.razveljavi()
if vrednost < beta:
najboljsa_poteza = p
beta = vrednost
if najboljsa_poteza is None:
najboljsa_poteza = p
if beta <= alpha:
break
assert (najboljsa_poteza is not None), 'alphabeta: izračunana poteza je None, veljavne_poteze={0}, globina={1}'.format(self.igra.veljavne_poteze(), globina)
return (najboljsa_poteza, alpha if maksimiziramo else beta)
else:
assert False, 'alphabeta: nedefinirano stanje igre'
| SamoFMF/stiri_v_vrsto | alphabeta.py | Python | mit | 17,907 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0005
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = 30000
__C.TRAIN.CACHE_PATH = None
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# __C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
# __C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a squre of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during finetuning, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
# Whether to tune the batch nomalization parameters during training
__C.RESNET.BN_TRAIN = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1. / 16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8,16,32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5,1,2]
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| junranhe/tf-faster-rcnn | lib/model/config.py | Python | mit | 11,161 |
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1056"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'open_newick.ui'
#
# Created: Tue Jan 10 15:56:56 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_OpenNewick(object):
def setupUi(self, OpenNewick):
OpenNewick.setObjectName("OpenNewick")
OpenNewick.resize(569, 353)
self.comboBox = QtGui.QComboBox(OpenNewick)
self.comboBox.setGeometry(QtCore.QRect(460, 300, 81, 23))
self.comboBox.setObjectName("comboBox")
self.widget = QtGui.QWidget(OpenNewick)
self.widget.setGeometry(QtCore.QRect(30, 10, 371, 321))
self.widget.setObjectName("widget")
self.retranslateUi(OpenNewick)
QtCore.QMetaObject.connectSlotsByName(OpenNewick)
def retranslateUi(self, OpenNewick):
OpenNewick.setWindowTitle(QtGui.QApplication.translate("OpenNewick", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
| csc8630Spring2014/Clusterizer | ete2/treeview/_open_newick.py | Python | mit | 2,493 |
from collections import Counter
from os.path import splitext
import matplotlib.pyplot as plt
from arcapix.fs.gpfs import ListProcessingRule, ManagementPolicy
def type_sizes(file_list):
c = Counter()
for f in file_list:
c.update({splitext(f.name): f.filesize})
return c
p = ManagementPolicy()
r = p.rules.new(ListProcessingRule, 'types', type_sizes)
result = p.run('mmfs1')['types']
plt.pie(list(result.values()), labels=list(result.keys()), autopct='%1.1f%%')
plt.axis('equal')
plt.show()
| arcapix/gpfsapi-examples | type_sizes_piechart.py | Python | mit | 519 |
# _*_ encoding: utf-8 _*_
import timeit
def insertion_sort(nums):
"""Insertion Sort."""
for index in range(1, len(nums)):
val = nums[index]
left_index = index - 1
while left_index >= 0 and nums[left_index] > val:
nums[left_index + 1] = nums[left_index]
left_index -= 1
nums[left_index + 1] = val
return nums
def insertion_sort_tuples(nums):
"""Insertion Sort."""
for index in range(1, len(nums)):
val = nums[index]
left_index = index - 1
while left_index >= 0 and nums[left_index][1] > val[1]:
nums[left_index + 1] = nums[left_index]
left_index -= 1
nums[left_index + 1] = val
return nums
if __name__ == '__main__':
print("""
The insertion sort algorithm sorts each item sequentially and compares its value
to its neighbor, working its way to the end of the list and moving smaller items to the left.
Here are the best and worst case scenarios:
Input (Worst Case Scenario):
lst_one = [x for x in range(0, 2000)]
lst_one.reverse()
""")
lst_one = [x for x in range(0, 2000)]
lst_one.reverse()
time1 = timeit.timeit('insertion_sort(lst_one)', setup="from __main__ import insertion_sort, lst_one",number=500)
print("""
Number of runs = 500
Average Time = {}
Input (Best Case Scenario):
lst_two = [x for x in range(0, 2000)]
""".format(time1))
lst_two = [x for x in range(0, 2000)]
time2 = timeit.timeit('insertion_sort(lst_two)', setup="from __main__ import insertion_sort, lst_two",number=500)
print("""
Number of runs = 500
Average Time = {}
""".format(time2))
| palindromed/data-structures2 | src/insertion_sort.py | Python | mit | 1,651 |
from django.db import models
class Citizen(models.Model):
"""
The insurance users.
"""
name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
# Contact information
email = models.EmailField()
phone = models.CharField(max_length=50)
# Citizen documents
CC = 'CC'
PASSPORT = 'PP'
document_choices = (
(CC, 'cc'),
(PASSPORT, 'Passport')
)
document_type = models.CharField(max_length=5, choices=document_choices)
document_number = models.BigIntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| jaconsta/soat_cnpx | api/citizens/models.py | Python | mit | 672 |
import datetime
import typing
from . import helpers
from .tl import types, custom
Phone = str
Username = str
PeerID = int
Entity = typing.Union[types.User, types.Chat, types.Channel]
FullEntity = typing.Union[types.UserFull, types.messages.ChatFull, types.ChatFull, types.ChannelFull]
EntityLike = typing.Union[
Phone,
Username,
PeerID,
types.TypePeer,
types.TypeInputPeer,
Entity,
FullEntity
]
EntitiesLike = typing.Union[EntityLike, typing.Sequence[EntityLike]]
ButtonLike = typing.Union[types.TypeKeyboardButton, custom.Button]
MarkupLike = typing.Union[
types.TypeReplyMarkup,
ButtonLike,
typing.Sequence[ButtonLike],
typing.Sequence[typing.Sequence[ButtonLike]]
]
TotalList = helpers.TotalList
DateLike = typing.Optional[typing.Union[float, datetime.datetime, datetime.date, datetime.timedelta]]
LocalPath = str
ExternalUrl = str
BotFileID = str
FileLike = typing.Union[
LocalPath,
ExternalUrl,
BotFileID,
bytes,
typing.BinaryIO,
types.TypeMessageMedia,
types.TypeInputFile,
types.TypeInputFileLocation
]
# Can't use `typing.Type` in Python 3.5.2
# See https://github.com/python/typing/issues/266
try:
OutFileLike = typing.Union[
str,
typing.Type[bytes],
typing.BinaryIO
]
except TypeError:
OutFileLike = typing.Union[
str,
typing.BinaryIO
]
MessageLike = typing.Union[str, types.Message]
MessageIDLike = typing.Union[int, types.Message, types.TypeInputMessage]
ProgressCallback = typing.Callable[[int, int], None]
| expectocode/Telethon | telethon/hints.py | Python | mit | 1,562 |
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
client_session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
def test_segment_pools():
### Test Segment ID Pool Operations
# Get all configured Segment Pools
get_segment_resp = client_session.read('vdnSegmentPools')
client_session.view_response(get_segment_resp)
# Add a Segment Pool
segments_create_body = client_session.extract_resource_body_example('vdnSegmentPools', 'create')
client_session.view_body_dict(segments_create_body)
segments_create_body['segmentRange']['begin'] = '11002'
segments_create_body['segmentRange']['end'] = '11003'
segments_create_body['segmentRange']['name'] = 'legacy'
create_response = client_session.create('vdnSegmentPools', request_body_dict=segments_create_body)
client_session.view_response(create_response)
time.sleep(5)
# Update the new Segment Pool:
update_segment_body = client_session.extract_resource_body_example('vdnSegmentPool', 'update')
update_segment_body['segmentRange']['name'] = 'PythonTest'
update_segment_body['segmentRange']['end'] = '11005'
client_session.update('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']},
request_body_dict=update_segment_body)
time.sleep(5)
# Display a specific Segment pool (the new one)
specific_segement_resp = client_session.read('vdnSegmentPool', uri_parameters={'segmentPoolId':
create_response['objectId']})
client_session.view_response(specific_segement_resp)
time.sleep(5)
# Delete new Segment Pool
client_session.delete('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']})
def test_mcast_pools():
### Test Multicast Pool Operations
# Add a multicast Pool
mcastpool_create_body = client_session.extract_resource_body_example('vdnMulticastPools', 'create')
client_session.view_body_dict(mcastpool_create_body)
mcastpool_create_body['multicastRange']['desc'] = 'Test'
mcastpool_create_body['multicastRange']['begin'] = '235.0.0.0'
mcastpool_create_body['multicastRange']['end'] = '235.1.1.1'
mcastpool_create_body['multicastRange']['name'] = 'legacy'
create_response = client_session.create('vdnMulticastPools', request_body_dict=mcastpool_create_body)
client_session.view_response(create_response)
# Get all configured Multicast Pools
get_mcast_pools = client_session.read('vdnMulticastPools')
client_session.view_response(get_mcast_pools)
time.sleep(5)
# Update the newly created mcast pool
mcastpool_update_body = client_session.extract_resource_body_example('vdnMulticastPool', 'update')
mcastpool_update_body['multicastRange']['end'] = '235.3.1.1'
mcastpool_update_body['multicastRange']['name'] = 'Python'
update_response = client_session.update('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']},
request_body_dict=mcastpool_update_body)
client_session.view_response(update_response)
# display a specific Multicast Pool
get_mcast_pool = client_session.read('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']})
client_session.view_response(get_mcast_pool)
# Delete new mcast pool
client_session.delete('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId': create_response['objectId']})
#test_segment_pools()
#test_mcast_pools()
| vmware/nsxramlclient | tests/vdnConfig.py | Python | mit | 5,003 |
import numpy as np
import jarvis.helpers.helpers as helpers
from data_cleaner import DataCleaner
def get_data(csv=None, sep='|'):
dataset = create_dataset(csv, sep)
inputs = DataCleaner().clean(dataset[:, 0:1])
outputs = format_targets(dataset[:, 1])
train_data, test_data = inputs[::2], inputs[1::2]
train_targets, test_targets = outputs[::2], outputs[1::2]
return [(train_data, train_targets), (test_data, test_targets)]
def create_dataset(csv, sep):
if csv:
return helpers.read_csv(csv, sep=sep).values
else:
data = []
for f in helpers.csvs():
for row in helpers.read_csv(f, sep=sep).values:
data.append(list(row))
return np.array(data)
def format_targets(target_list):
target_map = {}
index = 0
actions = helpers.get_actions()
# Map targets to their index inside of actions array
for action in actions:
target_map[action] = index
index += 1
return map(lambda target: target_map[target], target_list)
| whittlbc/jarvis | jarvis/learn/classify/data_prepper.py | Python | mit | 960 |
from jaspyx.visitor import BaseVisitor
class Return(BaseVisitor):
def visit_Return(self, node):
self.indent()
if node.value is not None:
self.output('return ')
self.visit(node.value)
else:
self.output('return')
self.finish()
| iksteen/jaspyx | jaspyx/visitor/return_.py | Python | mit | 299 |
import os
import os.path
from raiden.constants import RAIDEN_DB_VERSION
def database_from_privatekey(base_dir, app_number):
""" Format a database path based on the private key and app number. """
dbpath = os.path.join(base_dir, f"app{app_number}", f"v{RAIDEN_DB_VERSION}_log.db")
os.makedirs(os.path.dirname(dbpath))
return dbpath
| hackaugusto/raiden | raiden/tests/utils/app.py | Python | mit | 351 |
from django.apps import AppConfig
class ProxyConfig(AppConfig):
name = 'geoq.proxy'
verbose_name = 'GeoQ Proxy' | ngageoint/geoq | geoq/proxy/apps.py | Python | mit | 120 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.8.0'
REQUIRED_PACKAGES = [
'absl-py >= 0.1.6',
'astor >= 0.6.0',
'gast >= 0.2.0',
'numpy >= 1.13.3',
'six >= 1.10.0',
'protobuf >= 3.4.0',
'tensorboard >= 1.8.0, < 1.9.0',
'termcolor >= 1.1.0',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.8.0a0, < 1.9.0a0'
break
# weakref.finalize and enum were introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
REQUIRED_PACKAGES.append('enum34 >= 1.1.6')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main',
'toco_from_protos = tensorflow.contrib.lite.toco.python.toco_from_protos:main',
'toco = tensorflow.contrib.lite.toco.python.toco_wrapper:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
]
# pylint: enable=line-too-long
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='https://www.tensorflow.org/',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
| ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/tools/pip_package/setup.py | Python | mit | 8,836 |
'''
Created on Jun 16, 2014
@author: lwoydziak
'''
import pexpect
import sys
from dynamic_machine.cli_commands import assertResultNotEquals, Command
class SshCli(object):
LOGGED_IN = 0
def __init__(self, host, loginUser, debug = False, trace = False, log=None, port=22, pexpectObject=None):
self.pexpect = pexpect if not pexpectObject else pexpectObject
self.debug = debug
self.trace = trace
self.host = host
self._port = port
self._connection = None
self.modeList = []
self._log = log
self._bufferedCommands = None
self._bufferedMode = None
self._loginUser = loginUser
self._resetExpect()
def __del__(self):
self.closeCliConnectionTo()
def showOutputOnScreen(self):
self.debug = True
self.trace = True
self._log = None
self._setupLog()
def connectWithSsh(self):
self._debugLog("Establishing connection to " + self.host)
self._connection = self.pexpect.spawn(
'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s@%s -p %d' %
(self._loginUser.username, self.host, self._port))
if self._connection is None:
raise Exception("Unable to connect via SSH perhaps wrong IP!")
self._secure = True
self._setupLog()
self._loginUser.commandLine(self)
self.modeList = [self._loginUser]
def resetLoggingTo(self, log):
self._connection.logfile = log
def _setupLog(self):
if self.trace:
class Python3BytesToStdOut:
def write(self, s):
sys.stdout.buffer.write(s)
def flush(self):
sys.stdout.flush()
self._connection.logfile = Python3BytesToStdOut()
if self._log is not None:
self._connection.logfile = self._log
def loginSsh(self):
self._setupLog()
self._debugLog("Login in as "+self._loginUser.username)
try:
self._loginUser.sendPassword()
return True
except Exception as e:
self.forceCloseCliConnectionTo()
raise Exception('Exception ('+str(e)+') '+'Expected CLI response: "Password:"' + "\n Got: \n" + self._lastExpect())
def _exit_modes_beyond(self, thisMode):
if not self.modeList: return
while len(self.modeList) > thisMode + 1:
self.modeList.pop().exit()
def exitMode(self, mode):
if mode in self.modeList:
self.modeList.remove(mode)
def check_prereq(self, prereqMode = 0):
self._exit_modes_beyond(prereqMode)
if len(self.modeList) <= prereqMode:
raise Exception("Attempted to enter menu when prerequist mode was not entered, expected: %d" % prereqMode)
def execute_as(self, user):
self.check_prereq(self.LOGGED_IN)
self._exit_modes_beyond(self.LOGGED_IN)
user.commandLine(self)
user.login()
self.modeList.append(user)
return user
def closeCliConnectionTo(self):
if self._connection == None:
return
self._exit_modes_beyond(-1)
self.modeList = []
self._debugLog("Exited all modes.")
self.forceCloseCliConnectionTo()
def forceCloseCliConnectionTo(self):
self.modeList = None
if self._connection:
self._debugLog("Closing connection.")
self._connection.close()
self._connection = None
def _debugLog(self, message):
if self.debug:
print(message)
def _resetExpect(self):
self.previousExpectLine = ""
if self._connection is not None and isinstance(self._connection.buffer, str):
self.previousExpectLine = self._connection.buffer
self._connection.buffer = ""
def _lastExpect(self):
constructLine = self.previousExpectLine
if self._connection is not None and isinstance(self._connection.before, str):
constructLine += self._connection.before
if self._connection is not None and isinstance(self._connection.after, str):
constructLine += self._connection.after
return constructLine
def send(self, command):
if self._bufferedCommands is None:
self._bufferedCommands = command
else:
self._bufferedCommands += "\n" + command
if self._bufferedMode is None:
self.flush()
else:
self._debugLog("Buffering command " + command)
def flush(self):
if self._bufferedCommands is None:
return
self._connection.sendline(str(self._bufferedCommands))
self._bufferedCommands = None
def buffering(self):
return self._bufferedMode
def bufferedMode(self, mode = True):
if mode is None:
self.flush()
self._bufferedMode = mode
def compareReceivedAgainst(self, pattern, timeout=-1, searchwindowsize=None, indexOfSuccessfulResult=0):
if self._bufferedMode is None:
index = self._connection.expect(pattern, timeout, searchwindowsize)
self._debugLog("\nLooking for " + str(pattern) + " Found ("+str(index)+")")
self._debugLog(self._lastExpect())
return index
else:
return indexOfSuccessfulResult | Pipe-s/dynamic_machine | dynamic_machine/cli_ssh.py | Python | mit | 5,503 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: test.py
#
# Copyright 2018 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import logging
import json
import os
from bootstrap import bootstrap
from library import execute_command, tempdir
# This is the main prefix used for logging
LOGGER_BASENAME = '''_CI.test'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
def get_arguments():
parser = argparse.ArgumentParser(description='Accepts stages for testing')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--lint', help='Test the lint stage of the template', action='store_true')
group.add_argument('--test', help='Test the test stage of the template', action='store_true')
group.add_argument('--build', help='Test the build stage of the template', action='store_true')
group.add_argument('--document', help='Test the document stage of the template', action='store_true')
args = parser.parse_args()
return args
def _test(stage):
from cookiecutter.main import cookiecutter
template = os.path.abspath('.')
context = os.path.abspath('cookiecutter.json')
with tempdir():
cookiecutter(template,
extra_context=json.loads(open(context).read()),
no_input=True)
os.chdir(os.listdir('.')[0])
del os.environ['PIPENV_PIPFILE']
return execute_command(os.path.join('_CI', 'scripts', f'{stage}.py'))
def test(stage):
emojize = bootstrap()
exit_code = _test(stage)
success = not exit_code
if success:
LOGGER.info('%s Tested stage "%s" successfully! %s',
emojize(':white_heavy_check_mark:'),
stage,
emojize(':thumbs_up:'))
else:
LOGGER.error('%s Errors found testing stage "%s"! %s',
emojize(':cross_mark:'),
stage,
emojize(':crying_face:'))
raise SystemExit(exit_code)
if __name__ == '__main__':
args = get_arguments()
stage = next((argument for argument in ('lint', 'test', 'build', 'document')
if getattr(args, argument)), None)
test(stage)
| costastf/python_library_cookiecutter | _CI/scripts/test.py | Python | mit | 3,282 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gramfuzz.fields import *
import names
TOP_CAT = "postal"
# Adapted from https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form
# The name rules have been modified and placed into names.py
class PDef(Def):
cat = "postal_def"
class PRef(Ref):
cat = "postal_def"
EOL = "\n"
# this will be the top-most rule
Def("postal_address",
PRef("name-part"), PRef("street-address"), PRef("zip-part"),
cat="postal")
# these will be the grammar rules that should not be randomly generated
# as a top-level rule
PDef("name-part",
Ref("name", cat=names.TOP_CAT), EOL
)
PDef("street-address",
PRef("house-num"), PRef("street-name"), Opt(PRef("apt-num")), EOL,
sep=" ")
PDef("house-num", UInt)
PDef("street-name", Or(
"Sesame Street", "Yellow Brick Road", "Jump Street", "Evergreen Terrace",
"Elm Street", "Baker Street", "Paper Street", "Wisteria Lane",
"Coronation Street", "Rainey Street", "Spooner Street",
"0day Causeway", "Diagon Alley",
))
PDef("zip-part",
PRef("town-name"), ", ", PRef("state-code"), " ", PRef("zip-code"), EOL
)
PDef("apt-num",
UInt(min=0, max=10000), Opt(String(charset=String.charset_alpha_upper, min=1, max=2))
)
PDef("town-name", Or(
"Seoul", "São Paulo", "Bombay", "Jakarta", "Karachi", "Moscow",
"Istanbul", "Mexico City", "Shanghai", "Tokyo", "New York", "Bangkok",
"Beijing", "Delhi", "London", "HongKong", "Cairo", "Tehran", "Bogota",
"Bandung", "Tianjin", "Lima", "Rio de Janeiro" "Lahore", "Bogor",
"Santiago", "St Petersburg", "Shenyang", "Calcutta", "Wuhan", "Sydney",
"Guangzhou", "Singapore", "Madras", "Baghdad", "Pusan", "Los Angeles",
"Yokohama", "Dhaka", "Berlin", "Alexandria", "Bangalore", "Malang",
"Hyderabad", "Chongqing", "Ho Chi Minh City",
))
PDef("state-code", Or(
"AL", "AK", "AS", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL", "GA",
"GU", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MH",
"MA", "MI", "FM", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM",
"NY", "NC", "ND", "MP", "OH", "OK", "OR", "PW", "PA", "PR", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "VI", "WA", "WV", "WI", "WY",
))
PDef("zip-code",
String(charset="123456789",min=1,max=2), String(charset="0123456789",min=4,max=5),
Opt("-", String(charset="0123456789",min=4,max=5))
)
| d0c-s4vage/gramfuzz | examples/grams/postal.py | Python | mit | 2,381 |
#!/usr/bin/env python3
"""Combine logs from multiple bitcore nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| LIMXTEC/BitCore | test/functional/combine_logs.py | Python | mit | 4,611 |
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework_swagger.views import get_swagger_view
from . import views, views_api
# Django REST framework
router = DefaultRouter()
router.register(r'election', views_api.ElectionInterface)
router.register(r'district', views_api.DistrictInterface)
router.register(r'municipality', views_api.MunicipalityInterface)
router.register(r'party', views_api.PartyInterface)
router.register(r'polling_station', views_api.PollingStationInterface)
router.register(r'list', views_api.ListInterface)
router.register(r'result', views_api.PollingStationResultInterface)
router.register(r'regional_electoral_district', views_api.RegionalElectoralDistrictInterface)
# Django OpenAPI Swagger
schema_view = get_swagger_view(title='Offene Wahlen API')
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^loaderio-eac9628bcae9be5601e1f3c62594d162.txt$', views.load_test, name='load_test'),
url(r'^api/', include(router.urls)),
url(r'^api/docs$', schema_view)
]
| OKFNat/offenewahlen-nrw17 | src/austria/urls.py | Python | mit | 1,055 |
'''
Created on Jan 15, 2014
@author: Jose Borreguero
'''
from setuptools import setup
setup(
name = 'dsfinterp',
packages = ['dsfinterp','dsfinterp/test' ],
version = '0.1',
description = 'Cubic Spline Interpolation of Dynamics Structure Factors',
long_description = open('README.md').read(),
author = 'Jose Borreguero',
author_email = '[email protected]',
url = 'https://github.com/camm-sns/dsfinterp',
download_url = 'http://pypi.python.org/pypi/dsfinterp',
keywords = ['AMBER', 'mdend', 'energy', 'molecular dynamics'],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Physics',
],
)
| camm/dsfinterp | setup.py | Python | mit | 880 |
# -*- coding: utf-8 -*-
from django.core.mail import EmailMultiAlternatives
from django.template import Context, Template
from django.template.loader import get_template
from helpers import ClientRouter, MailAssetsHelper, strip_accents
class UserMail:
"""
This class is responsible for firing emails for Users and Nonprofits
"""
from_email = 'Atados <[email protected]>'
def __init__(self, user):
self.whole_user = user # This is the Nonprofit or Volunteer object
self.user = user.user if not type(user).__name__=='User' else user # This is the User object
self.global_context = {
"assets": {
"check": "https://s3.amazonaws.com/atados-us/images/check.png",
"iconFacebook": "https://s3.amazonaws.com/atados-us/images/icon-fb.png",
"iconInstagram": "https://s3.amazonaws.com/atados-us/images/icon-insta.png",
"logoAtadosSmall": "https://s3.amazonaws.com/atados-us/images/logo.small.png",
"logoAtadosSmall2": "https://s3.amazonaws.com/atados-us/images/mandala.png"
}
}
def sendEmail(self, template_name, subject, context, user_email=None):
text_content = get_template('email/{}.txt'.format(template_name)).render(context)
html_content = get_template('email/{}.html'.format(template_name)).render(context)
msg = EmailMultiAlternatives(subject, text_content, self.from_email, [user_email if user_email else self.user.email])
msg.attach_alternative(text_content, "text/plain")
msg.attach_alternative(html_content, "text/html")
return msg.send() > 0
def make_context(self, data):
context_data = self.global_context.copy()
context_data.update(data)
return Context(context_data)
def sendSignupConfirmation(self, site, token):
return self.sendEmail('emailVerification', 'Confirme seu email do Atados.', self.make_context({ 'token': token , 'site': site}))
class VolunteerMail(UserMail):
"""
This class contains all emails sent to volunteers
"""
def sendSignup(self):
"""
Email A/B from ruler
Sent when volunteer completes registration
"""
return self.sendEmail('volunteerSignup', 'Eba! Seu cadastro foi feito com sucesso', self.make_context({}))
def sendFacebookSignup(self): # pass by now
"""
Sent when volunteer completes registration from Facebook
"""
return self.sendEmail('volunteerFacebookSignup', 'Seja bem vindo ao Atados! \o/', self.make_context({}))
def sendAppliesToProject(self, project):
"""
Email for ruler C
Sent when volunteer applies to project
"""
return self.sendEmail('volunteerAppliesToProject', u'Você se inscreveu em uma vaga :)', self.make_context({'project': project}))
def askActInteractionConfirmation(self, project, volunteer):
"""
Email for ruler D
Sent when volunteer applies to project
"""
confirm_url = ClientRouter.mail_routine_monitoring_build_form_url(True, volunteer.user.email, project.nonprofit.name, "")
refute_url = ClientRouter.mail_routine_monitoring_build_form_url(False, volunteer.user.email, project.nonprofit.name, "")
return self.sendEmail('askActInteractionConfirmation', u'Acompanhamento de Rotina:)',
self.make_context({
'project': project,
'confirm_url': confirm_url,
'refute_url': refute_url
})
)
def sendAskAboutProjectExperience(self, apply):
"""
"""
subject = u"Como foi sua experiência com a Atados!"
feedback_form_url = ClientRouter.mail_ask_about_project_experience_url('volunteer', apply)
return self.sendEmail('volunteerAskAboutProjectExperience', subject, self.make_context({
'project_name': apply.project.name,
'feedback_form_url': feedback_form_url,
}), apply.volunteer.user.email)
#+ def sendAfterApply4Weeks(self): # new ruler
#+ """
#+ """
#+ context = Context({'user': self.user.name})
#+ return self.sendEmail('volunteerAfterApply4Weeks', '~ ~ ~ ~ ~', context)
#+ def send3DaysBeforePontual(self): # new ruler
#+ """
#+ """
#+ context = Context({'user': self.user.name})
#+ return self.sendEmail('volunteer3DaysBeforePontual', '~ ~ ~ ~ ~', context)
class NonprofitMail(UserMail):
"""
This class contains all emails sent to nonprofits
"""
def sendSignup(self):
"""
Email 1 from ruler
"""
return self.sendEmail('nonprofitSignup', 'Recebemos seu cadastro :)', self.make_context({
'review_profile_url': ClientRouter.edit_nonprofit_url(self.user.slug)
}))
def sendApproved(self):
"""
Email 2 from ruler
"""
return self.sendEmail('nonprofitApproved', 'Agora você tem um perfil no Atados', self.make_context({
'new_act_url': ClientRouter.new_act_url()
}))
def sendProjectPostingSuccessful(self, project):
"""
Email *NEW*
"""
return self.sendEmail('projectPostingSuccessful', 'Vaga criada com sucesso!', self.make_context({
'project': project,
'edit_project_url': ClientRouter.edit_project_url(project.slug)
}))
edit_nonprofit_act_url(self, act_slug)
def sendProjectApproved(self, project):
"""
Email 3 from ruler
"""
return self.sendEmail('projectApproved', 'Publicamos a sua vaga de voluntariado', self.make_context({
'project': project,
'act_url': ClientRouter.view_act_url(project.slug)
}))
def sendGetsNotifiedAboutApply(self, apply, message):
"""
Email 4 from ruler
"""
try:
subject = u'Novo voluntário para o {}'.format(apply.project.name)
except UnicodeEncodeError:
subject = u'Novo voluntário para o {}'.format(strip_accents(apply.project.name))
return self.sendEmail('nonprofitGetsNotifiedAboutApply', subject, self.make_context({
'apply': apply,
'volunteer_message': message,
'answer_volunteer_url': ClientRouter.view_volunteer_url(apply.volunteer.user.slug)
}), apply.project.email)
def sendAskAboutProjectExperience(self, project):
"""
"""
subject = u"Nos conta como foi sua experiência com a Atados!"
act_url = ClientRouter.edit_project_url(project.slug)
feedback_form_url = ClientRouter.mail_ask_about_project_experience_url('nonprofit', project)
return self.sendEmail('nonprofitAskAboutProjectExperience', subject, self.make_context({
'project_name': project.name,
'feedback_form_url': feedback_form_url,
'act_url': act_url,
}), project.email)
#+ def send1MonthInactive(self):
#+ """
#+ """
#+ return self.sendEmail('nonprofit1MonthInactive', '~ ~ ~ ~ ~', self.make_context({
#+ 'name': self.user.name
#+ }))
#+ def sendPontual(self):
#+ """
#+ """
#+ return self.sendEmail('nonprofitPontual', '~ ~ ~ ~ ~', self.make_context({
#+ 'name': self.user.name
#+ }))
#+ def sendRecorrente(self):
#+ """
#+ """
#+ return self.sendEmail('nonprofitRecorrente', '~ ~ ~ ~ ~', self.make_context({
#+ 'name': self.user.name
#+ }))
| atados/api | atados_core/emails.py | Python | mit | 7,061 |
# -*- coding: utf-8 -*-
"""
flask.ext.babelex
~~~~~~~~~~~~~~~~~
Implements i18n/l10n support for Flask applications based on Babel.
:copyright: (c) 2013 by Serge S. Koval, Armin Ronacher and contributors.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
# this is a workaround for a snow leopard bug that babel does not
# work around :)
if os.environ.get('LC_CTYPE', '').lower() == 'utf-8':
os.environ['LC_CTYPE'] = 'en_US.utf-8'
from datetime import datetime
from flask import _request_ctx_stack
from babel import dates, numbers, support, Locale
from babel.support import NullTranslations
from werkzeug import ImmutableDict
try:
from pytz.gae import pytz
except ImportError:
from pytz import timezone, UTC
else:
timezone = pytz.timezone
UTC = pytz.UTC
from flask_babelex._compat import string_types
_DEFAULT_LOCALE = Locale.parse('en')
class Babel(object):
"""Central controller class that can be used to configure how
Flask-Babel behaves. Each application that wants to use Flask-Babel
has to create, or run :meth:`init_app` on, an instance of this class
after the configuration was initialized.
"""
default_date_formats = ImmutableDict({
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
})
def __init__(self, app=None, default_locale='en', default_timezone='UTC',
date_formats=None, configure_jinja=True, default_domain=None):
self._default_locale = default_locale
self._default_timezone = default_timezone
self._date_formats = date_formats
self._configure_jinja = configure_jinja
self.app = app
self._locale_cache = dict()
if default_domain is None:
self._default_domain = Domain()
else:
self._default_domain = default_domain
self.locale_selector_func = None
self.timezone_selector_func = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Set up this instance for use with *app*, if no app was passed to
the constructor.
"""
self.app = app
app.babel_instance = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['babel'] = self
app.config.setdefault('BABEL_DEFAULT_LOCALE', self._default_locale)
app.config.setdefault('BABEL_DEFAULT_TIMEZONE', self._default_timezone)
if self._date_formats is None:
self._date_formats = self.default_date_formats.copy()
#: a mapping of Babel datetime format strings that can be modified
#: to change the defaults. If you invoke :func:`format_datetime`
#: and do not provide any format string Flask-Babel will do the
#: following things:
#:
#: 1. look up ``date_formats['datetime']``. By default ``'medium'``
#: is returned to enforce medium length datetime formats.
#: 2. ``date_formats['datetime.medium'] (if ``'medium'`` was
#: returned in step one) is looked up. If the return value
#: is anything but `None` this is used as new format string.
#: otherwise the default for that language is used.
self.date_formats = self._date_formats
if self._configure_jinja:
app.jinja_env.filters.update(
datetimeformat=format_datetime,
dateformat=format_date,
timeformat=format_time,
timedeltaformat=format_timedelta,
numberformat=format_number,
decimalformat=format_decimal,
currencyformat=format_currency,
percentformat=format_percent,
scientificformat=format_scientific,
)
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_gettext_callables(
lambda x: get_domain().get_translations().ugettext(x),
lambda s, p, n: get_domain().get_translations().ungettext(s, p, n),
newstyle=True
)
def localeselector(self, f):
"""Registers a callback function for locale selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the locale falls back to the one from
the configuration.
This has to return the locale as string (eg: ``'de_AT'``, ''`en_US`'')
"""
assert self.locale_selector_func is None, \
'a localeselector function is already registered'
self.locale_selector_func = f
return f
def timezoneselector(self, f):
"""Registers a callback function for timezone selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the timezone falls back to the one from
the configuration.
This has to return the timezone as string (eg: ``'Europe/Vienna'``)
"""
assert self.timezone_selector_func is None, \
'a timezoneselector function is already registered'
self.timezone_selector_func = f
return f
def list_translations(self):
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings.
.. versionadded:: 0.6
"""
dirname = os.path.join(self.app.root_path, 'translations')
if not os.path.isdir(dirname):
return []
result = []
for folder in os.listdir(dirname):
locale_dir = os.path.join(dirname, folder, 'LC_MESSAGES')
if not os.path.isdir(locale_dir):
continue
if filter(lambda x: x.endswith('.mo'), os.listdir(locale_dir)):
result.append(Locale.parse(folder))
if not result:
result.append(Locale.parse(self._default_locale))
return result
@property
def default_locale(self):
"""The default locale from the configuration as instance of a
`babel.Locale` object.
"""
return self.load_locale(self.app.config['BABEL_DEFAULT_LOCALE'])
@property
def default_timezone(self):
"""The default timezone from the configuration as instance of a
`pytz.timezone` object.
"""
return timezone(self.app.config['BABEL_DEFAULT_TIMEZONE'])
def load_locale(self, locale):
"""Load locale by name and cache it. Returns instance of a `babel.Locale`
object.
"""
rv = self._locale_cache.get(locale)
if rv is None:
self._locale_cache[locale] = rv = Locale.parse(locale)
return rv
def get_locale():
"""Returns the locale that should be used for this request as
`babel.Locale` object. This returns `None` if used outside of
a request. If flask-babel was not attached to the Flask application,
will return 'en' locale.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
locale = getattr(ctx, 'babel_locale', None)
if locale is None:
babel = ctx.app.extensions.get('babel')
if babel is None:
locale = _DEFAULT_LOCALE
else:
if babel.locale_selector_func is not None:
rv = babel.locale_selector_func()
if rv is None:
locale = babel.default_locale
else:
locale = babel.load_locale(rv)
else:
locale = babel.default_locale
ctx.babel_locale = locale
return locale
def get_timezone():
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `None` if used outside of
a request. If flask-babel was not attached to application, will
return UTC timezone object.
"""
ctx = _request_ctx_stack.top
tzinfo = getattr(ctx, 'babel_tzinfo', None)
if tzinfo is None:
babel = ctx.app.extensions.get('babel')
if babel is None:
tzinfo = UTC
else:
if babel.timezone_selector_func is None:
tzinfo = babel.default_timezone
else:
rv = babel.timezone_selector_func()
if rv is None:
tzinfo = babel.default_timezone
else:
if isinstance(rv, string_types):
tzinfo = timezone(rv)
else:
tzinfo = rv
ctx.babel_tzinfo = tzinfo
return tzinfo
def refresh():
"""Refreshes the cached timezones and locale information. This can
be used to switch a translation between a request and if you want
the changes to take place immediately, not just with the next request::
user.timezone = request.form['timezone']
user.locale = request.form['locale']
refresh()
flash(gettext('Language was changed'))
Without that refresh, the :func:`~flask.flash` function would probably
return English text and a now German page.
"""
ctx = _request_ctx_stack.top
for key in 'babel_locale', 'babel_tzinfo':
if hasattr(ctx, key):
delattr(ctx, key)
def _get_format(key, format):
"""A small helper for the datetime formatting functions. Looks up
format defaults for different kinds.
"""
babel = _request_ctx_stack.top.app.extensions.get('babel')
if babel is not None:
formats = babel.date_formats
else:
formats = Babel.default_date_formats
if format is None:
format = formats[key]
if format in ('short', 'medium', 'full', 'long'):
rv = formats['%s.%s' % (key, format)]
if rv is not None:
format = rv
return format
def to_user_timezone(datetime):
"""Convert a datetime object to the user's timezone. This automatically
happens on all date formatting unless rebasing is disabled. If you need
to convert a :class:`datetime.datetime` object at any time to the user's
timezone (as returned by :func:`get_timezone` this function can be used).
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=UTC)
tzinfo = get_timezone()
return tzinfo.normalize(datetime.astimezone(tzinfo))
def to_utc(datetime):
"""Convert a datetime object to UTC and drop tzinfo. This is the
opposite operation to :func:`to_user_timezone`.
"""
if datetime.tzinfo is None:
datetime = get_timezone().localize(datetime)
return datetime.astimezone(UTC).replace(tzinfo=None)
def format_datetime(datetime=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `datetimeformat`.
"""
format = _get_format('datetime', format)
return _date_format(dates.format_datetime, datetime, format, rebase)
def format_date(date=None, format=None, rebase=True):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` or :class:`~datetime.date` object is passed,
the current time is assumed. By default rebasing happens which causes
the object to be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function only formats the date part
of a :class:`~datetime.datetime` object.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `dateformat`.
"""
if rebase and isinstance(date, datetime):
date = to_user_timezone(date)
format = _get_format('date', format)
return _date_format(dates.format_date, date, format, rebase)
def format_time(time=None, format=None, rebase=True):
"""Return a time formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `timeformat`.
"""
format = _get_format('time', format)
return _date_format(dates.format_time, time, format, rebase)
def format_timedelta(datetime_or_timedelta, granularity='second'):
"""Format the elapsed time from the given date to now or the given
timedelta. This currently requires an unreleased development
version of Babel.
This function is also available in the template context as filter
named `timedeltaformat`.
"""
if isinstance(datetime_or_timedelta, datetime):
datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta
return dates.format_timedelta(datetime_or_timedelta, granularity,
locale=get_locale())
def _date_format(formatter, obj, format, rebase, **extra):
"""Internal helper that formats the date."""
locale = get_locale()
extra = {}
if formatter is not dates.format_date and rebase:
extra['tzinfo'] = get_timezone()
return formatter(obj, format, locale=locale, **extra)
def format_number(number):
"""Return the given number formatted for the locale in request
:param number: the number to format
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_number(number, locale=locale)
def format_decimal(number, format=None):
"""Return the given decimal number formatted for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_decimal(number, format=format, locale=locale)
def format_currency(number, currency, format=None):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param currency: the currency code
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_currency(
number, currency, format=format, locale=locale
)
def format_percent(number, format=None):
"""Return formatted percent value for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_percent(number, format=format, locale=locale)
def format_scientific(number, format=None):
"""Return value formatted in scientific notation for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
locale = get_locale()
return numbers.format_scientific(number, format=format, locale=locale)
class Domain(object):
"""Localization domain. By default will use look for tranlations in Flask application directory
and "messages" domain - all message catalogs should be called ``messages.mo``.
"""
def __init__(self, dirname=None, domain='messages'):
self.dirname = dirname
self.domain = domain
self.cache = dict()
def as_default(self):
"""Set this domain as default for the current request"""
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError("No request context")
ctx.babel_domain = self
def get_translations_cache(self, ctx):
"""Returns dictionary-like object for translation caching"""
return self.cache
def get_translations_path(self, ctx):
"""Returns translations directory path. Override if you want
to implement custom behavior.
"""
return self.dirname or os.path.join(ctx.app.root_path, 'translations')
def get_translations(self):
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
dirname = self.get_translations_path(ctx)
translations = support.Translations.load(dirname,
locale,
domain=self.domain)
cache[str(locale)] = translations
return translations
def gettext(self, string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
t = self.get_translations()
return t.ugettext(string) % variables
def ngettext(self, singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.ungettext(singular, plural, num) % variables
def pgettext(self, context, string, **variables):
"""Like :func:`gettext` but with a context.
.. versionadded:: 0.7
"""
t = self.get_translations()
return t.upgettext(context, string) % variables
def npgettext(self, context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
variables.setdefault('num', num)
t = self.get_translations()
return t.unpgettext(context, singular, plural, num) % variables
def lazy_gettext(self, string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
from speaklater import make_lazy_string
return make_lazy_string(self.gettext, string, **variables)
def lazy_pgettext(self, context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
from speaklater import make_lazy_string
return make_lazy_string(self.pgettext, context, string, **variables)
# This is the domain that will be used if there is no request context (and thus no app)
# or if the app isn't initialized for babel. Note that if there is no request context,
# then the standard Domain will use NullTranslations
domain = Domain()
def get_domain():
"""Return the correct translation domain that is used for this request.
This will return the default domain (e.g. "messages" in <approot>/translations")
if none is set for this request.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return domain
try:
return ctx.babel_domain
except AttributeError:
pass
babel = ctx.app.extensions.get('babel')
if babel is not None:
d = babel._default_domain
else:
d = domain
ctx.babel_domain = d
return d
# Create shortcuts for the default Flask domain
def gettext(*args, **kwargs):
return get_domain().gettext(*args, **kwargs)
_ = gettext
def ngettext(*args, **kwargs):
return get_domain().ngettext(*args, **kwargs)
def pgettext(*args, **kwargs):
return get_domain().pgettext(*args, **kwargs)
def npgettext(*args, **kwargs):
return get_domain().npgettext(*args, **kwargs)
def lazy_gettext(*args, **kwargs):
return get_domain().lazy_gettext(*args, **kwargs)
def lazy_pgettext(*args, **kwargs):
return get_domain().lazy_pgettext(*args, **kwargs)
| initNirvana/Easyphotos | env/lib/python3.4/site-packages/flask_babelex/__init__.py | Python | mit | 22,503 |
try:
from tornado.websocket import WebSocketHandler
import tornado.ioloop
tornadoAvailable = True
except ImportError:
class WebSocketHandler(object): pass
tornadoAvailable = False
from json import loads as fromJS, dumps as toJS
from threading import Thread
from Log import console
import Settings
from utils import *
PORT = Settings.PORT + 1
handlers = []
channels = {}
class WebSocket:
@staticmethod
def available():
return tornadoAvailable
@staticmethod
def start():
if WebSocket.available():
WSThread().start()
@staticmethod
def broadcast(data):
for handler in handlers:
handler.write_message(toJS(data))
@staticmethod
def sendChannel(channel, data):
if not 'channel' in data:
data['channel'] = channel
for handler in channels.get(channel, []):
handler.write_message(toJS(data))
class WSThread(Thread):
def __init__(self):
Thread.__init__(self)
self.name = 'websocket'
self.daemon = True
def run(self):
app = tornado.web.Application([('/', WSHandler)])
app.listen(PORT, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kw):
super(WSHandler, self).__init__(*args, **kw)
self.channels = set()
def check_origin(self, origin):
return True
def open(self):
handlers.append(self)
console('websocket', "Opened")
def on_message(self, message):
console('websocket', "Message received: %s" % message)
try:
data = fromJS(message)
except:
return
if 'subscribe' in data and isinstance(data['subscribe'], list):
addChannels = (set(data['subscribe']) - self.channels)
self.channels |= addChannels
for channel in addChannels:
if channel not in channels:
channels[channel] = set()
channels[channel].add(self)
if 'unsubscribe' in data and isinstance(data['unsubscribe'], list):
rmChannels = (self.channels & set(data['unsubscribe']))
self.channels -= rmChannels
for channel in rmChannels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
def on_close(self):
for channel in self.channels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
handlers.remove(self)
console('websocket', "Closed")
verbs = {
'status': "Status set",
'name': "Renamed",
'goal': "Goal set",
'assigned': "Reassigned",
'hours': "Hours updated",
}
from Event import EventHandler, addEventHandler
class ShareTaskChanges(EventHandler):
def newTask(self, handler, task):
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'new'}); #TODO
def taskUpdate(self, handler, task, field, value):
if field == 'assigned': # Convert set of Users to list of usernames
value = [user.username for user in value]
elif field == 'goal': # Convert Goal to goal ID
value = value.id if value else 0
description = ("%s by %s" % (verbs[field], task.creator)) if field in verbs else None
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'update', 'id': task.id, 'revision': task.revision, 'field': field, 'value': value, 'description': description, 'creator': task.creator.username})
addEventHandler(ShareTaskChanges())
| mrozekma/Sprint | WebSocket.py | Python | mit | 3,192 |
# __init__.py: Yet Another Bayes Net library
# Contact: Jacob Schreiber ( [email protected] )
"""
For detailed documentation and examples, see the README.
"""
# Make our dependencies explicit so compiled Cython code won't segfault trying
# to load them.
import networkx, matplotlib.pyplot, scipy
import numpy as np
import os
import pyximport
# Adapted from Cython docs https://github.com/cython/cython/wiki/
# InstallingOnWindows#mingw--numpy--pyximport-at-runtime
if os.name == 'nt':
if 'CPATH' in os.environ:
os.environ['CPATH'] = os.environ['CPATH'] + np.get_include()
else:
os.environ['CPATH'] = np.get_include()
# XXX: we're assuming that MinGW is installed in C:\MinGW (default)
if 'PATH' in os.environ:
os.environ['PATH'] = os.environ['PATH'] + ';C:\MinGW\bin'
else:
os.environ['PATH'] = 'C:\MinGW\bin'
mingw_setup_args = { 'options': { 'build_ext': { 'compiler': 'mingw32' } } }
pyximport.install(setup_args=mingw_setup_args)
elif os.name == 'posix':
if 'CFLAGS' in os.environ:
os.environ['CFLAGS'] = os.environ['CFLAGS'] + ' -I' + np.get_include()
else:
os.environ['CFLAGS'] = ' -I' + np.get_include()
pyximport.install()
from yabn import *
__version__ = '0.1.0' | jmschrei/yabn | yabn/__init__.py | Python | mit | 1,279 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make_loaddata.py
Convert ken_all.csv to loaddata
"""
import argparse
import csv
def merge_separated_line(args):
"""
yields line
yields a line.
if two (or more) lines has same postalcode,
merge them.
"""
def is_dup(line, buff):
""" lines is duplicated or not """
# same postalcode
if line[2] != buff[2]:
return False
# include choume and not
if line[11] != buff[11]:
return False
# line contains touten(kana)
if line[5].count(u'、') != 0:
return True
if buff[5].count(u'、') != 0:
return True
# line contains touten(kanji)
if line[8].count(u'、') != 0:
return True
if buff[8].count(u'、') != 0:
return True
return False
def merge(line, buff):
""" merge address of two lines """
new_buff = []
idx = 0
for element in line:
if element[:len(buff[idx])] != buff[idx]:
new_buff.append(u''.join([buff[idx], element]))
else:
new_buff.append(buff[idx])
idx += 1
return new_buff
line_buffer = []
ken_all = csv.reader(open(args.source))
for line in ken_all:
unicode_line = [unicode(s, 'utf8') for s in line]
if not(line_buffer):
line_buffer = unicode_line
continue
if is_dup(unicode_line, line_buffer):
line_buffer = merge(unicode_line, line_buffer)
else:
yield line_buffer
line_buffer = unicode_line
yield line_buffer
def parse_args():
# parse aruguments
Parser = argparse.ArgumentParser(description='Make loaddata of postalcode.')
Parser.add_argument('source', help='input file of converting')
Parser.add_argument('area', help='data file for area-code')
Parser.add_argument('net', help='data file of net-code')
return Parser.parse_args()
def main(args):
# converting main
Areadata = csv.writer(open(args.area, 'w'),
delimiter=',',
quoting=csv.QUOTE_NONE)
Netdata = csv.writer(open(args.net, 'w'),
delimiter=',',
quoting=csv.QUOTE_NONE)
for line in merge_separated_line(args):
zipcode = line[2]
if zipcode[5:7] != '00':
Areadata.writerow([s.encode('utf8') for s in line])
else:
Netdata.writerow([s.encode('utf8') for s in line])
if __name__ == '__main__':
args = parse_args()
main(args)
| morinatsu/ZipCode | bin/make_loaddata.py | Python | mit | 2,655 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-23 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20171221_0336'),
]
operations = [
migrations.AlterField(
model_name='dailyproductivitylog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], max_length=50),
),
migrations.AlterField(
model_name='sleeplog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], max_length=50),
),
migrations.AlterField(
model_name='supplementlog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
migrations.AlterField(
model_name='useractivitylog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
migrations.AlterField(
model_name='usermoodlog',
name='source',
field=models.CharField(choices=[('api', 'Api'), ('ios', 'Ios'), ('android', 'Android'), ('mobile', 'Mobile'), ('web', 'Web'), ('user_excel', 'User_Excel'), ('text_message', 'Text_Message')], default='web', max_length=50),
),
]
| jeffshek/betterself | events/migrations/0004_auto_20171223_0859.py | Python | mit | 1,984 |
import aaf
import os
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
if not args:
parser.error("not enough argements")
path = args[0]
name, ext = os.path.splitext(path)
f = aaf.open(path, 'r')
f.save(name + ".xml")
f.close()
| markreidvfx/pyaaf | example/aaf2xml.py | Python | mit | 281 |
"""
telemetry full tests.
"""
import platform
import sys
from unittest import mock
import pytest
import wandb
def test_telemetry_finish(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 2 in telemetry.get("3", [])
def test_telemetry_imports_hf(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
with mock.patch.dict("sys.modules", {"transformers": mock.Mock()}):
import transformers
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# hf in finish modules but not in init modules
assert telemetry and 11 not in telemetry.get("1", [])
assert telemetry and 11 in telemetry.get("2", [])
def test_telemetry_imports_catboost(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
with mock.patch.dict("sys.modules", {"catboost": mock.Mock()}):
import catboost
run = wandb.init()
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# catboost in both init and finish modules
assert telemetry and 7 in telemetry.get("1", [])
assert telemetry and 7 in telemetry.get("2", [])
@pytest.mark.skipif(
platform.system() == "Windows", reason="test suite does not build jaxlib on windows"
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="jax has no py3.10 wheel")
def test_telemetry_imports_jax(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
import jax
wandb.init()
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# jax in finish modules but not in init modules
assert telemetry and 12 in telemetry.get("1", [])
assert telemetry and 12 in telemetry.get("2", [])
def test_telemetry_run_organizing_init(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
wandb.init(name="test_name", tags=["my-tag"], config={"abc": 123}, id="mynewid")
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 13 in telemetry.get("3", []) # name
assert telemetry and 14 in telemetry.get("3", []) # id
assert telemetry and 15 in telemetry.get("3", []) # tags
assert telemetry and 16 in telemetry.get("3", []) # config
def test_telemetry_run_organizing_set(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
run.name = "test-name"
run.tags = ["tag1"]
wandb.config.update = True
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 17 in telemetry.get("3", []) # name
assert telemetry and 18 in telemetry.get("3", []) # tags
assert telemetry and 19 in telemetry.get("3", []) # config update
| wandb/client | tests/test_telemetry_full.py | Python | mit | 3,333 |
# -*- coding: utf-8 -*-
' 检查扩展名是否合法 '
__author__ = 'Ellery'
from app import app
import datetime, random
from PIL import Image
import os
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config.get('ALLOWED_EXTENSIONS')
def unique_name():
now_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
random_num = random.randint(0, 100)
if random_num <= 10:
random_num = str(0) + str(random_num)
unique_num = str(now_time) + str(random_num)
return unique_num
def image_thumbnail(filename):
filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
im = Image.open(filepath)
w, h = im.size
if w > h:
im.thumbnail((106, 106*h/w))
else:
im.thumbnail((106*w/h, 106))
im.save(os.path.join(app.config.get('UPLOAD_FOLDER'),
os.path.splitext(filename)[0] + '_thumbnail' + os.path.splitext(filename)[1]))
def image_delete(filename):
thumbnail_filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
filepath = thumbnail_filepath.replace('_thumbnail', '')
os.remove(filepath)
os.remove(thumbnail_filepath)
def cut_image(filename, box):
filepath = os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename)
im = Image.open(filepath)
new_im = im.crop(box)
new_im.save(os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename)) | allotory/basilinna | app/main/upload_file.py | Python | mit | 1,433 |
Subsets and Splits