repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
oszi/collection-json.python | collection_json.py | 1 | 16792 | """Classes for representing a Collection+JSON document."""
from __future__ import absolute_import, unicode_literals
import json
__version__ = '0.1.1'
class ArrayProperty(object):
"""A descriptor that converts from any enumerable to a typed Array."""
def __init__(self, cls, name):
"""Constructs typed array property
:param cls type: the type of objects expected in the array
:param name str: name of the property
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, value):
if value is None:
value = []
instance.__dict__[self.name] = Array(self.cls, self.name, value)
class DictProperty(object):
"""A descriptor that converts to a dictionary containing Arrays or objects of a given type"""
def __init__(self, cls, name):
"""Constructs the dictionary
:param cls type: the expected type of the objects
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, vals):
instance.__dict__[self.name] = {}
if vals is not None:
for name, value in vals.items():
if value is None or isinstance(value, self.cls):
instance.__dict__[self.name][name] = value
elif isinstance(value, dict):
instance.__dict__[self.name][name] = self.cls(**value)
elif isinstance(value, list):
self.cls = self.cls
instance.__dict__[self.name][name] = Array(self.cls, None, value)
else:
raise TypeError("Invalid value '%s', "
"expected dict, list or '%s'" % (value,
self.cls.__name__))
class TypedProperty(object):
"""A descriptor for assigning only a specific type of instance.
Additionally supports assigning a dictionary convertable to the type.
"""
def __init__(self, cls, name):
"""Constructs the typed property
:param cls type: the type of object expected
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, value):
if value is None or isinstance(value, self.cls):
instance.__dict__[self.name] = value
elif isinstance(value, dict):
instance.__dict__[self.name] = self.cls(**value)
else:
raise TypeError("Invalid value '%s', "
"expected dict or '%s'" % (value,
self.cls.__name__))
class ComparableObject(object):
"""Abstract base class for objects implementing equality comparison.
This class provides default __eq__ and __ne__ implementations.
"""
def __eq__(self, other):
"""Return True if both instances are equivalent."""
return (type(self) == type(other) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
"""Return True if both instances are not equivalent."""
return (type(self) != type(other) or
self.__dict__ != other.__dict__)
class Data(ComparableObject):
"""Object representing a Collection+JSON data object."""
def __init__(self, name, value=None, prompt=None, array=None, object=None):
self.name = name
self.value = value
self.array = array
self.object = object
self.prompt = prompt
property_count = 0
if value is not None: property_count = property_count+1
if array is not None: property_count = property_count+1
if object is not None: property_count = property_count+1
if property_count > 1:
raise ValueError('Data can only have one of the three properties.')
def __repr__(self):
data = "name='%s'" % self.name
if self.prompt is not None:
data += " prompt='%s'" % self.prompt
return "<Data: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Data object."""
output = {
'name': self.name
}
if self.value is not None:
output['value'] = self.value
elif self.array is not None:
output['array'] = self.array
elif self.object is not None:
output['object'] = self.object
if self.prompt is not None:
output['prompt'] = self.prompt
return output
class Link(ComparableObject):
"""Object representing a Collection+JSON link object."""
def __init__(self, href, rel, name=None, render=None, prompt=None,
length=None, inline=None):
self.href = href
self.rel = rel
self.name = name
self.render = render
self.prompt = prompt
self.length = length
self.inline = inline
def __repr__(self):
data = "rel='%s'" % self.rel
if self.name:
data += " name='%s'" % self.name
if self.render:
data += " render='%s'" % self.render
if self.prompt:
data += " prompt='%s'" % self.prompt
if self.length:
data += " length='%s'" % self.length
if self.inline:
data += " inline='%s'" % self.inline
return "<Link: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Link object."""
output = {
'href': self.href,
'rel': self.rel,
}
if self.name is not None:
output['name'] = self.name
if self.render is not None:
output['render'] = self.render
if self.prompt is not None:
output['prompt'] = self.prompt
if self.length is not None:
output['length'] = self.length
if self.inline is not None:
output['inline'] = self.inline
return output
class Error(ComparableObject):
"""Object representing a Collection+JSON error object."""
def __init__(self, code=None, message=None, title=None):
self.code = code
self.message = message
self.title = title
def __repr__(self):
data = ''
if self.code is not None:
data += " code='%s'" % self.code
if self.message is not None:
data += " message='%s'" % self.message
if self.title is not None:
data += " title='%s'" % self.title
return "<Error%s>" % data
def to_dict(self):
"""Return a dictionary representing the Error instance."""
output = {}
if self.code:
output['code'] = self.code
if self.message:
output['message'] = self.message
if self.title:
output['title'] = self.title
return output
class Template(ComparableObject):
"""Object representing a Collection+JSON template object."""
data = ArrayProperty(Data, "data")
@staticmethod
def from_json(data):
"""Return a template instance.
Convenience method for parsing 'write' responses,
which should only contain a template object.
This method parses a json string into a Template object.
Raises `ValueError` when no valid document is provided.
"""
try:
data = json.loads(data)
kwargs = data.get('template')
if not kwargs:
raise ValueError
except ValueError:
raise ValueError('Not valid Collection+JSON template data.')
template = Template(**kwargs)
return template
def __init__(self, data=None):
self.data = data
def __repr__(self):
data = [str(item.name) for item in self.data]
return "<Template: data=%s>" % data
def __getattr__(self, name):
return getattr(self.data, name)
@property
def properties(self):
"""Return a list of names that can be looked up on the template."""
return [item.name for item in self.data]
def to_dict(self):
"""Return a dictionary representing a Template object."""
return {
'template': self.data.to_dict()
}
class Array(ComparableObject, list):
"""Object representing a Collection+JSON array."""
def __init__(self, item_class, collection_name, items):
self.item_class = item_class
self.collection_name = collection_name
super(Array, self).__init__(self._build_items(items))
def _build_items(self, items):
result = []
for item in items:
if isinstance(item, self.item_class):
result.append(item)
elif isinstance(item, dict):
result.append(self.item_class(**item))
else:
raise ValueError("Invalid value for %s: %r" % (
self.item_class.__name__, item))
return result
def __eq__(self, other):
"""Return True if both instances are equivalent."""
return (super(Array, self).__eq__(other) and
list.__eq__(self, other))
def __ne__(self, other):
"""Return True if both instances are not equivalent."""
return (super(Array, self).__ne__(other) or
list.__ne__(self, other))
def __getattr__(self, name):
results = self.find(name=name)
if not results:
raise AttributeError
elif len(results) == 1:
results = results[0]
return results
def _matches(self, name=None, rel=None):
for item in self:
item_name = getattr(item, 'name', None)
item_rel = getattr(item, 'rel', None)
if name is not None and item_name == name and rel is None:
# only searching by name
yield item
elif rel is not None and item_rel == rel and name is None:
# only searching by rel
yield item
elif item_name == name and item_rel == rel:
# searching by name and rel
yield item
def find(self, name=None, rel=None):
"""Return a list of items in the array matching name and/or rel.
If both name and rel parameters are provided, returned items must match
both properties.
"""
return list(self._matches(name=name, rel=rel))
def get(self, name=None, rel=None):
"""Return the first item in the array matching name and/or rel.
If both name and rel parameters are provided, the returned item must
match both properties.
If no item is found, raises ValueError.
"""
try:
return next(self._matches(name=name, rel=rel))
except StopIteration:
raise ValueError('No matching item found.')
def to_dict(self):
"""Return a dictionary representing an Array object."""
if self.item_class is Collection:
data = {
item.href: item.to_dict() for item in self
}
else:
data = [
item.to_dict() for item in self
]
if self.collection_name is not None:
return {
self.collection_name: data
}
return data
class Item(ComparableObject):
"""Object representing a Collection+JSON item object."""
data = ArrayProperty(Data, "data")
links = ArrayProperty(Link, "links")
def __init__(self, href=None, data=None, links=None):
self.href = href
self.data = data
self.links = links
def __repr__(self):
return "<Item: href='%s'>" % self.href
def __getattr__(self, name):
return getattr(self.data, name)
@property
def properties(self):
"""Return a list of names that can be looked up on the item."""
return [item.name for item in self.data]
def to_dict(self):
"""Return a dictionary representing an Item object."""
output = {}
if self.href:
output['href'] = self.href
if self.data:
output.update(self.data.to_dict())
if self.links:
output.update(self.links.to_dict())
return output
class Query(ComparableObject):
"""Object representing a Collection+JSON query object."""
data = ArrayProperty(Data, "data")
def __init__(self, href, rel, name=None, prompt=None, data=None):
self.href = href
self.rel = rel
self.name = name
self.prompt = prompt
self.data = data
def __repr__(self):
data = "rel='%s'" % self.rel
if self.name:
data += " name='%s'" % self.name
if self.prompt:
data += " prompt='%s'" % self.prompt
return "<Query: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Query object."""
output = {
'href': self.href,
'rel': self.rel,
}
if self.name is not None:
output['name'] = self.name
if self.prompt is not None:
output['prompt'] = self.prompt
if len(self.data):
output.update(self.data.to_dict())
return output
class Collection(ComparableObject):
"""Object representing a Collection+JSON document."""
@staticmethod
def from_json(data):
"""Return a Collection instance.
This method parses a json string into a Collection object.
Raises `ValueError` when no valid document is provided.
"""
try:
data = json.loads(data)
kwargs = data.get('collection')
if not kwargs:
raise ValueError
if 'inline' in kwargs and kwargs['inline']:
kwargs['inline'] = [Collection(**data.get('collection'))
for data in kwargs['inline'].values()]
except ValueError:
raise ValueError('Not a valid Collection+JSON document.')
collection = Collection(**kwargs)
return collection
def __new__(cls, *args, **kwargs):
cls.error = TypedProperty(Error, 'error')
cls.errors = DictProperty(Error, 'errors')
cls.template = TypedProperty(Template, 'template')
cls.items = ArrayProperty(Item, 'items')
cls.links = ArrayProperty(Link, 'links')
cls.inline = ArrayProperty(Collection, 'inline')
cls.queries = ArrayProperty(Query, 'queries')
return super(Collection, cls).__new__(cls)
def __init__(self, href, links=None, items=None, inline=None, queries=None,
template=None, error=None, errors=None, version='1.0'):
self.version = version
self.href = href
self.error = error
self.errors = errors
self.template = template
self.items = items
self.links = links
self.inline = inline
self.queries = queries
def __repr__(self):
return "<Collection: version='%s' href='%s'>" % (
self.version, self.href)
def __str__(self):
return json.dumps(self.to_dict())
def to_dict(self):
"""Return a dictionary representing a Collection object."""
output = {
'collection': {
'version': self.version,
'href': self.href,
}
}
if self.links:
output['collection'].update(self.links.to_dict())
if self.items:
output['collection'].update(self.items.to_dict())
if self.inline:
output['collection'].update(self.inline.to_dict())
if self.queries:
output['collection'].update(self.queries.to_dict())
if self.template:
output['collection'].update(self.template.to_dict())
if self.error:
output['collection'].update(self.error.to_dict())
if self.errors:
output['collection']['errors'] = {name : value.to_dict() for name, value in self.errors.items()}
return output
| bsd-2-clause | -7,806,763,957,741,782,000 | 30.038817 | 108 | 0.548535 | false | 4.442328 | false | false | false |
Flaburgan/diaspora-hub | thefederation/apps.py | 2 | 1049 | import datetime
import django_rq
from django.apps import AppConfig
class TheFederationConfig(AppConfig):
name = "thefederation"
verbose_name = "The Federation"
def ready(self):
from thefederation.social import make_daily_post
from thefederation.tasks import aggregate_daily_stats
from thefederation.tasks import poll_nodes
scheduler = django_rq.get_scheduler('high')
# Delete any existing jobs in the scheduler when the app starts up
for job in scheduler.get_jobs():
job.delete()
scheduler.schedule(
scheduled_time=datetime.datetime.utcnow(),
func=aggregate_daily_stats,
interval=5500,
)
scheduler.cron(
'0 10 * * *',
func=make_daily_post,
queue_name='high',
)
scheduler = django_rq.get_scheduler('medium')
scheduler.schedule(
scheduled_time=datetime.datetime.utcnow(),
func=poll_nodes,
interval=10800,
)
| agpl-3.0 | -5,405,463,045,591,289,000 | 26.605263 | 74 | 0.604385 | false | 4.407563 | false | false | false |
RobinQuetin/CAIRIS-web | cairis/cairis/PersonaCharacteristicPanel.py | 1 | 5070 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
from DimensionNameDialog import DimensionNameDialog
from DialogClassParameters import DialogClassParameters
from DocumentReferenceDialog import DocumentReferenceDialog
from ConceptReferenceDialog import ConceptReferenceDialog
from Borg import Borg
class PersonaCharacteristicPanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.PERSONACHARACTERISTIC_ID)
self.theId = None
b = Borg()
self.dbProxy = b.dbProxy
def buildControls(self,isCreate,inPersona):
mainSizer = wx.BoxSizer(wx.VERTICAL)
if (inPersona == False):
personas = self.dbProxy.getDimensionNames('persona')
mainSizer.Add(self.buildComboSizerList('Persona',(87,30),armid.PERSONACHARACTERISTIC_COMBOPERSONA_ID,personas),0,wx.EXPAND)
mainSizer.Add(self.buildRadioButtonSizer('Type',(87,30),[(armid.PERSONACHARACTERISTIC_RADIOREFERENCE_ID,'Reference'),(armid.PERSONACHARACTERISTIC_RADIOCONCEPT_ID,'Concept')]))
refs = ['[New reference]']
refs += self.dbProxy.getDimensionNames('document_reference')
mainSizer.Add(self.buildComboSizerList('Reference',(87,30),armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID,refs),0,wx.EXPAND)
if (inPersona == False):
bVars = self.dbProxy.getDimensionNames('behavioural_variable')
mainSizer.Add(self.buildComboSizerList('Behavioural Variable',(87,30),armid.PERSONACHARACTERISTIC_COMBOVARIABLE_ID,bVars),0,wx.EXPAND)
mainSizer.Add(self.buildMLTextSizer('Characteristic',(87,30),armid.PERSONACHARACTERISTIC_TEXTCHARACTERISTIC_ID),1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(armid.PERSONACHARACTERISTIC_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
wx.EVT_COMBOBOX(self,armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID,self.onReferenceChange)
wx.EVT_RADIOBUTTON(self,armid.PERSONACHARACTERISTIC_RADIOREFERENCE_ID,self.onReferenceSelected)
wx.EVT_RADIOBUTTON(self,armid.PERSONACHARACTERISTIC_RADIOCONCEPT_ID,self.onConceptSelected)
self.SetSizer(mainSizer)
def loadControls(self,objt,inPersona):
self.theId = objt.id()
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
charCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_TEXTCHARACTERISTIC_ID)
refCtrl.SetValue(objt.reference())
charCtrl.SetValue(objt.characteristic())
if (inPersona == False):
pCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOPERSONA_ID)
varCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOVARIABLE_ID)
pCtrl.SetValue(objt.persona())
varCtrl.SetValue(objt.behaviouralVariable())
def onReferenceChange(self,evt):
refValue = evt.GetString()
if (refValue == '[New reference]' or refValue == '[New concept]'):
if (refValue == '[New reference]'):
addParameters = DialogClassParameters(armid.DOCUMENTREFERENCE_ID,'Add Document Reference',DocumentReferenceDialog,armid.DOCUMENTREFERENCE_BUTTONCOMMIT_ID,self.dbProxy.addDocumentReference,True)
else:
addParameters = DialogClassParameters(armid.CONCEPTREFERENCE_ID,'Add Concept Reference',ConceptReferenceDialog,armid.CONCEPTREFERENCE_BUTTONCOMMIT_ID,self.dbProxy.addConceptReference,True)
dialogClass = addParameters.dclass()
addDialog = dialogClass(self,addParameters)
if (addDialog.ShowModal() == addParameters.createButtonId()):
dialogOutParameters = addDialog.parameters()
addFn = addParameters.setter()
objtId = addFn(dialogOutParameters)
dimName = dialogOutParameters.name()
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refCtrl.Append(dimName)
refCtrl.SetValue(dimName)
addDialog.Destroy()
def onReferenceSelected(self,evt):
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refs = ['[New reference]']
refs += self.dbProxy.getDimensionNames('document_reference')
refCtrl.SetItems(refs)
refCtrl.SetValue('')
def onConceptSelected(self,evt):
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refs = ['[New concept]']
refs += self.dbProxy.getDimensionNames('concept_reference')
refCtrl.SetItems(refs)
refCtrl.SetValue('')
| apache-2.0 | 8,992,763,075,810,244,000 | 47.285714 | 201 | 0.763116 | false | 3.528184 | false | false | false |
AaronYee/ufldl_tutorial | cnn.py | 6 | 4479 | import numpy as np
import scipy.signal
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def cnn_convolve(patch_dim, num_features, images, W, b, zca_white, patch_mean):
"""
Returns the convolution of the features given by W and b with
the given images
:param patch_dim: patch (feature) dimension
:param num_features: number of features
:param images: large images to convolve with, matrix in the form
images(r, c, channel, image number)
:param W: weights of the sparse autoencoder
:param b: bias of the sparse autoencoder
:param zca_white: zca whitening
:param patch_mean: mean of the images
:return:
"""
num_images = images.shape[3]
image_dim = images.shape[0]
image_channels = images.shape[2]
# Instructions:
# Convolve every feature with every large image here to produce the
# numFeatures x numImages x (imageDim - patchDim + 1) x (imageDim - patchDim + 1)
# matrix convolvedFeatures, such that
# convolvedFeatures(featureNum, imageNum, imageRow, imageCol) is the
# value of the convolved featureNum feature for the imageNum image over
# the region (imageRow, imageCol) to (imageRow + patchDim - 1, imageCol + patchDim - 1)
#
# Expected running times:
# Convolving with 100 images should take less than 3 minutes
# Convolving with 5000 images should take around an hour
# (So to save time when testing, you should convolve with less images, as
# described earlier)
convolved_features = np.zeros(shape=(num_features, num_images, image_dim - patch_dim + 1,
image_dim - patch_dim + 1),
dtype=np.float64)
WT = W.dot(zca_white)
bT = b - WT.dot(patch_mean)
for i in range(num_images):
for j in range(num_features):
# convolution of image with feature matrix for each channel
convolved_image = np.zeros(shape=(image_dim - patch_dim + 1, image_dim - patch_dim + 1),
dtype=np.float64)
for channel in range(image_channels):
# Obtain the feature (patchDim x patchDim) needed during the convolution
patch_size = patch_dim * patch_dim
feature = WT[j, patch_size * channel:patch_size * (channel + 1)].reshape(patch_dim, patch_dim)
# Flip the feature matrix because of the definition of convolution, as explained later
feature = np.flipud(np.fliplr(feature))
# Obtain the image
im = images[:, :, channel, i]
# Convolve "feature" with "im", adding the result to convolvedImage
# be sure to do a 'valid' convolution
convolved_image += scipy.signal.convolve2d(im, feature, mode='valid')
# Subtract the bias unit (correcting for the mean subtraction as well)
# Then, apply the sigmoid function to get the hidden activation
convolved_image = sigmoid(convolved_image + bT[j])
# The convolved feature is the sum of the convolved values for all channels
convolved_features[j, i, :, :] = convolved_image
return convolved_features
def cnn_pool(pool_dim, convolved_features):
"""
Pools the given convolved features
:param pool_dim: dimension of the pooling region
:param convolved_features: convolved features to pool (as given by cnn_convolve)
convolved_features(feature_num, image_num, image_row, image_col)
:return: pooled_features: matrix of pooled features in the form
pooledFeatures(featureNum, imageNum, poolRow, poolCol)
"""
num_images = convolved_features.shape[1]
num_features = convolved_features.shape[0]
convolved_dim = convolved_features.shape[2]
assert convolved_dim % pool_dim == 0, "Pooling dimension is not an exact multiple of convolved dimension"
pool_size = convolved_dim / pool_dim
pooled_features = np.zeros(shape=(num_features, num_images, pool_size, pool_size),
dtype=np.float64)
for i in range(pool_size):
for j in range(pool_size):
pool = convolved_features[:, :, i * pool_dim:(i + 1) * pool_dim, j * pool_dim:(j + 1) * pool_dim]
pooled_features[:, :, i, j] = np.mean(np.mean(pool, 2), 2)
return pooled_features | mit | 1,652,702,458,388,864,500 | 41.264151 | 110 | 0.620674 | false | 3.818414 | false | false | false |
DataONEorg/d1_python | test_utilities/src/d1_test/mock_api/ping.py | 1 | 2010 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock a ping response.
CNCore.ping() → null
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.ping
MNRead.ping() → null
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/MN_APIs.html#MNCore.ping
A DataONEException can be triggered by adding a custom header. See
d1_exception.py
"""
import logging
import re
import responses
import d1_common.const
import d1_common.url
import d1_test.mock_api.d1_exception
PING_ENDPOINT_RX = r"v([123])/monitor/ping"
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(r"^" + d1_common.url.joinPathElements(base_url, PING_ENDPOINT_RX)),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
# Return regular response
body_str = "OK"
header_dict = {"Content-Type": d1_common.const.CONTENT_TYPE_OCTET_STREAM}
return 200, header_dict, body_str
| apache-2.0 | -175,475,432,308,310,720 | 30.84127 | 90 | 0.732802 | false | 3.377104 | false | false | false |
NPWR/Year-2042 | genericFunctions.py | 1 | 2725 | import pygame as pg
from pygame.locals import *
from constants import *
import sys
AROUND = [[0,0],
[1,0],
[1,1],
[0,1],
[-1,1],
[-1,0],
[-1,-1],
[0,-1],
[1,-1]]
def MOVE(cell,vec):
return [cell[0]+vec[0],cell[1]+vec[1]]
KEY_ON = {
"UP":False,
"DOWN":False,
"LEFT":False,
"RIGHT":False,
"SPACE":False,
"LCLICK":False,
"RCLICK":False}
def onScreen(pos):
ret = True
if pos[0] < 0:
ret = False
elif pos[0] >= W:
ret = False
if pos[1] < 0:
ret = False
elif pos[1] >= H:
ret = False
return ret
def verifyColor(color):
r = color[0]
g = color[1]
b = color[2]
if r < 0:
r = 0
elif r > 255:
r = 255
if g < 0:
g = 0
elif r > 255:
g = 0
if b < 0:
b = 0
elif b > 255:
b = 255
return (r,g,b)
def handleEvent(WORLD,event,M_MASK):
mb = pg.mouse.get_pressed()
N_MASK = 0
if mb[0]:
N_MASK += M_L
if mb[1]:
N_MASK += M_M
if mb[2]:
N_MASK += M_R
D_MASK = -(N_MASK - M_MASK)
M_MASK = N_MASK
if event.type == QUIT:
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_F4:
pg.quit()
sys.exit()
if event.key == K_DOWN or event.key == K_s:
KEY_ON["DOWN"] = True
if event.key == K_UP or event.key == K_w:
KEY_ON["UP"] = True
if event.key == K_LEFT or event.key == K_a:
KEY_ON["LEFT"] = True
if event.key == K_RIGHT or event.key == K_d:
KEY_ON["RIGHT"] = True
if event.key == K_SPACE:
KEY_ON["SPACE"] = True
if event.type == MOUSEBUTTONDOWN:
if pg.mouse.get_pressed()[0]:
KEY_ON["LCLICK"] = True
WORLD.signal('LCLICK')
if pg.mouse.get_pressed()[2]:
KEY_ON["RCLICK"] = True
WORLD.signal('RCLICK')
else:
if D_MASK & M_R:
KEY_ON["RCLICK"] = False
if D_MASK & M_L:
KEY_ON["LCLICK"] = False
if event.type == KEYUP:
if event.key == K_DOWN or event.key == K_s:
KEY_ON["DOWN"] = False
if event.key == K_UP or event.key == K_w:
KEY_ON["UP"] = False
if event.key == K_LEFT or event.key == K_a:
KEY_ON["LEFT"] = False
if event.key == K_RIGHT or event.key == K_d:
KEY_ON["RIGHT"] = False
if event.key == K_SPACE:
KEY_ON["SPACE"] = False
return M_MASK
| gpl-2.0 | 6,854,233,328,337,062,000 | 19.801527 | 52 | 0.430826 | false | 3.164925 | false | false | false |
polltooh/TensorflowToolbox | utility/file_io.py | 1 | 2817 | import os
import random
import numpy as np
import importlib
def get_listfile(image_dir, extension=".jpg"):
if not image_dir.endswith("/"):
image_dir = image_dir + "/"
image_list = os.listdir(image_dir)
image_list = [image_dir + image for image in image_list if image.endswith(extension)]
return image_list
def get_dir_list(frame_dir):
if not frame_dir.endswith("/"):
frame_dir = frame_dir + "/"
dir_list = os.listdir(frame_dir)
dir_list = [frame_dir +
image_dir for image_dir in dir_list if os.path.isdir(frame_dir + image_dir)]
return dir_list
def delete_last_empty_line(s):
end_index = len(s) - 1
while(end_index >= 0 and (s[end_index] == "\n" or s[end_index] == "\r")):
end_index -= 1
s = s[:end_index + 1]
return s
def read_file(file_name):
with open(file_name, "r") as f:
s = f.read()
s = delete_last_empty_line(s)
s_l = s.split("\n")
for i, l in enumerate(s_l):
if l.endswith("\r"):
s_l[i] = s_l[i][:-1]
return s_l
def save_file(string_list, file_name, shuffle_data=False):
if (shuffle_data):
random.shuffle(string_list)
with open(file_name, "w") as f:
if not len(string_list):
f.write("")
else:
file_string = '\n'.join(string_list)
if (file_string[-1] != "\n"):
file_string += "\n"
f.write(file_string)
def get_file_length(file_name):
with open(file_name, 'r') as f:
s = f.read()
s_l = s.split("\n")
total_len = len(s_l)
return total_len
def save_numpy_array(numpy_array, file_name):
numpy_array.tofile(file_name)
def remove_extension(file_name):
index = file_name.rfind(".")
if (index == -1):
return file_name
else:
return file_name[0:index]
def import_module_class(module_name, class_name=None):
module = importlib.import_module(module_name)
if class_name == None:
return module
else:
return getattr(module, class_name)
def check_exist(file_name):
"""
Args:
file_name: file name of the file list
i.e.: train_list.txt
"""
file_list = read_file(file_name)
for i, f in enumerate(file_list):
f_l = f.split(" ")
for ff in f_l:
is_exist = os.path.exists(ff)
if not is_exist:
raise OSError("In %s, row: %d, "
"%s does not exist" % (file_name, i, ff))
def save_string(input_string, file_name):
if os.path.exists(file_name):
mode = "a"
else:
mode = "w"
if not input_string.endswith("\n"):
input_string += "\n"
with open(file_name, mode) as f:
f.write(input_string)
| apache-2.0 | 2,384,068,973,960,488,400 | 24.151786 | 92 | 0.546326 | false | 3.226804 | false | false | false |
timbueno/longboxed | longboxed/manage/users.py | 1 | 5632 | # -*- coding: utf-8 -*-
"""
longboxed.manage.users
~~~~~~~~~~~~~~~~~~~~~
user management commands
"""
from flask import current_app
from flask.ext.script import Command, Option, prompt, prompt_pass
from flask.ext.security.forms import RegisterForm
from flask.ext.security.registerable import register_user
from werkzeug.datastructures import MultiDict
from werkzeug.local import LocalProxy
from ..core import db
from ..models import User, Role, Publisher
class RemovePublisherTitleFromPullLists(Command):
"""
Removes all instances of titles by a certain publisher from all users pull
lists
"""
def get_options(self):
return [
Option('-p', '--publisher', dest='publisher', required=True),
]
def run(self, publisher=None):
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '!! Starting: Removing all \'%s\' titles from users pull lists' % publisher
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
if Publisher.query.filter_by(name=publisher).first():
pagination = User.query.paginate(1, per_page=20, error_out=False)
has_next = True
while has_next:
for user in pagination.items:
save_user = False
for title in user.pull_list:
if title.publisher.name == publisher:
print 'Removing %s from %s\'s pull list...' % (title.name, user.email)
save_user = True
user.pull_list.remove(title)
if save_user:
user.save()
if pagination.page:
percent_complete = (pagination.page/float(pagination.pages)) * 100.0
print '%.2f%% complete...' % percent_complete
if pagination.has_next:
pagination = pagination.next(error_out=False)
else:
has_next = False
else:
print 'Publisher \'%s\' not found' % publisher
class CreateNewRoleCommand(Command):
"""Creates a role"""
def run(self):
name = prompt('Role Name')
description = prompt('Role Description')
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
_security_datastore.create_role(name=name, description=description)
db.session.commit()
return
class CreateDefaultRolesCommand(Command):
"""Creates inital roles (user, admin, super)"""
def run(self):
default_roles = [('user', 'No Permissions'), ('admin', 'Comic specific permissions'), ('super', 'All permissions')]
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
for role in default_roles:
_security_datastore.find_or_create_role(name=role[0], description=role[1])
db.session.commit()
print 'Sucessfully added roles'
class CreateUserCommand(Command):
"""Create a user"""
def run(self):
email = prompt('Email')
password = prompt_pass('Password')
password_confirm = prompt_pass('Confirm Password')
data = MultiDict(dict(email=email, password=password, password_confirm=password_confirm))
form = RegisterForm(data, csrf_enabled=False)
if form.validate():
user = register_user(email=email, password=password)
print '\nUser created successfully'
print 'User(id=%s email=%s' % (user.id, user.email)
return
print '\nError creating user:'
for errors in form.errors.values():
print '\n'.join(errors)
class AddSuperUserRoleCommand(Command):
"""Gives the given user SuperUser role"""
def run(self):
email = prompt('Email')
# user = users.first(email=email)
user = User.query.filter_by(email=email).first()
if user:
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
admin_role = _security_datastore.find_role('admin')
super_role = _security_datastore.find_role('super')
_security_datastore.add_role_to_user(user, super_role)
_security_datastore.add_role_to_user(user, admin_role)
db.session.commit()
print '\nUser given super role sucessfully'
return
print '\nNo user found'
class AddAdminUserRoleCommand(Command):
"""Gives the given user admin role"""
def run(self):
email = prompt('Email')
# user = users.first(email=email)
user = User.query.filter_by(email=email).first()
if user:
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
admin_role = _security_datastore.find_role('admin')
_security_datastore.add_role_to_user(user, admin_role)
db.session.commit()
print '\nUser given admin role sucessfully'
return
print '\nNo user found'
class ListRolesCommand(Command):
"""List all roles"""
def run(self):
for r in Role.query.all():
print 'Role(name=%s description=%s)' % (r.name, r.description)
# for r in roles.all():
# print 'Role(name=%s description=%s)' % (r.name, r.description)
class ListUsersCommand(Command):
"""List all users"""
def run(self):
for u in User.query.all():
print 'User(id=%s email=%s)' % (u.id, u.email)
# for u in users.all():
# print 'User(id=%s email=%s)' % (u.id, u.email)
| mit | 45,860,504,678,721,230 | 35.810458 | 123 | 0.585405 | false | 4.218727 | false | false | false |
plaidml/plaidml | plaidbench/plaidbench.py | 1 | 8191 | #!/usr/bin/env python
#
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import plaidbench.cli
SUPPORTED_NETWORKS = {
'keras': [
'densenet121',
'densenet169',
'densenet201',
'imdb_lstm',
'inception_resnet_v2',
'inception_v3',
'mobilenet',
'mobilenet_v2',
'nasnet_large',
'nasnet_mobile',
'resnet50',
'resnet50_v2',
'resnext50',
'vgg16',
'vgg19',
'xception',
],
'onnx': [
'bvlc_alexnet',
'densenet121',
'inception_v1',
'inception_v2',
'resnet50',
'shufflenet',
'squeezenet', # TODO: Fix inputs/outputs (only available as *.pb)
'vgg16',
'vgg19',
],
}
def make_parser():
# Create the parser outside of main() so the doc system can call this function
# and thereby generate a web page describing these options. See docs/index.rst.
parser = argparse.ArgumentParser()
plaidargs = parser.add_mutually_exclusive_group()
plaidargs.add_argument('--plaid', action='store_true', help="Use PlaidML as the backend.")
plaidargs.add_argument('--plaid-edsl',
action='store_true',
help="EXPERIMENTAL: Use PlaidML2 (EDSL) as the backend")
plaidargs.add_argument('--caffe2', action='store_true', help="Use Caffe2 as the backend.")
plaidargs.add_argument('--tf', action='store_true', help="Use TensorFlow as the backend.")
plaidargs.add_argument(
'--no-plaid',
action='store_true',
help="Use the non-PlaidML backend most appropriate to the chosen frontend")
frontendargs = parser.add_mutually_exclusive_group()
frontendargs.add_argument('--keras', action='store_true', help='Use Keras as the frontend')
frontendargs.add_argument('--onnx', action='store_true', help='Use ONNX as the frontend')
parser.add_argument('--fp16',
action='store_true',
help="Use half-precision floats, setting floatx='float16'.")
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help="Logging verbosity level (0..4).")
parser.add_argument('--results',
default='/tmp/plaidbench_results',
help="Destination directory for results output.")
parser.add_argument('--callgrind',
action='store_true',
help="Invoke callgrind during timing runs.")
parser.add_argument('--no-warmup', action='store_true', help="Skip the warmup runs.")
parser.add_argument('--no-kernel-timing', action='store_true', help="Skip the warmup runs.")
parser.add_argument('-n',
'--examples',
type=int,
default=None,
help="Number of examples to use.")
parser.add_argument('--epochs', type=int, default=1, help="Number of epochs per test.")
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--train',
action='store_true',
help="Measure training performance instead of inference.")
parser.add_argument('--blanket-run',
action='store_true',
help="Run all networks at a range of batch sizes, ignoring the "
"--batch-size and --examples options and the choice of network.")
parser.add_argument('--print-stacktraces',
action='store_true',
help="Print a stack trace if an exception occurs.")
parser.add_argument('--onnx-cpu',
action='store_true',
help='Use CPU instead of GPU (only used by ONNX)')
parser.add_argument('--refresh-onnx-data',
action='store_true',
help='Download ONNX data even if cached')
parser.add_argument('--tile', default=None, help='Export to this *.tile file')
parser.add_argument(
'--fix-learn-phase',
action='store_true',
help='Set the Keras learning_phase to an integer (rather than an input tensor)')
all_supported_networks = set()
for _, networks in SUPPORTED_NETWORKS.items():
all_supported_networks = all_supported_networks.union(networks)
parser.add_argument('module', choices=all_supported_networks, metavar='network')
return parser
def main():
exit_status = 0
parser = make_parser()
args = parser.parse_args()
argv = []
# plaidbench arguments
if args.verbose:
argv.append('-{}'.format('v' * args.verbose))
if args.results:
argv.append('--results={}'.format(args.results))
if args.callgrind:
argv.append('--callgrind')
if args.examples:
argv.append('--examples={}'.format(args.examples))
if args.epochs:
argv.append('--epochs={}'.format(args.epochs))
if args.batch_size:
argv.append('--batch-size={}'.format(args.batch_size))
if args.blanket_run:
argv.append('--blanket-run')
if args.no_warmup:
argv.append('--no-warmup')
if args.no_kernel_timing:
argv.append('--no-kernel-timing')
if args.print_stacktraces:
argv.append('--print-stacktraces')
if args.onnx:
# onnx arguments
argv.append('onnx')
if args.fp16:
raise NotImplementedError(
'With ONNX, --fp16 is defined by the model, not by the caller')
if args.train:
raise NotImplementedError('With ONNX, training vs. inference is model-specific')
if args.tile:
raise NotImplementedError(
'Can\'t currently save Tile code with PlaidBench ONNX backend.')
if args.onnx_cpu:
argv.append('--cpu')
if args.refresh_onnx_data:
argv.append('--no-use-cached-data')
if args.plaid_edsl:
argv.append('--plaid-edsl')
elif args.plaid or (not args.no_plaid and not args.caffe2 and not args.tf):
argv.append('--plaid')
elif args.caffe2:
argv.append('--caffe2')
else:
argv.append('--tensorflow')
else:
# keras arguments
argv.append('keras')
if args.tile:
argv.append('--tile={}'.format(args.tile))
if args.fp16:
argv.append('--fp16')
if args.train:
argv.append('--train')
if args.onnx_cpu:
raise NotImplementedError('--onnx_cpu is only meaningful with --onnx')
if args.refresh_onnx_data:
argv.append('--refresh-onnx-data is only meaningful with --onnx')
if args.fix_learn_phase:
argv.append('--fix-learn-phase')
if args.plaid_edsl:
argv.append('--plaid-edsl')
os.environ["KERAS_BACKEND"] = "plaidml2.bridge.keras.__init__"
elif args.plaid or (not args.no_plaid and not args.caffe2 and not args.tf):
argv.append('--plaid')
elif args.caffe2:
raise ValueError('There is no Caffe2 backend for Keras')
else:
argv.append('--tensorflow')
if args.tile:
raise NotImplementedError('Can\'t save Tile code except in PlaidML')
# Networks
if args.module:
argv.append(args.module)
# Invoke plaidbench to do the actual benchmarking.
plaidbench.cli.plaidbench(args=argv)
if __name__ == '__main__':
main()
| apache-2.0 | 4,325,232,983,265,446,400 | 37.636792 | 96 | 0.579661 | false | 4.038955 | false | false | false |
ControlSystemStudio/org.csstudio.iter | products/org.csstudio.iter.css.product/demo/m-TEST/src/main/boy/mimics/scripts/valve_control.py | 2 | 2794 | from epics import caget, caput, camonitor
from time import sleep
# Settings for the performance test
# open: OPEN THRESHOLD - 75%
# close: CLOSE THRESHOLD - 25%
open = caget('CTRL-SUP-BOY:VC101-COH.VAL') + 15
close = caget('CTRL-SUP-BOY:VC101-COL.VAL') - 15
start = caget('CTRL-SUP-BOY:PERF-SRT.VAL')
init = 40
firstValve = 101
lastValve = 350
tempo = 5
# scan: scan rate for the valve position
scan = caget('CTRL-SUP-BOY:PERF-SCAN.VAL')
maxLoop = int(caget('CTRL-SUP-BOY:PERF-LOOP.VAL'))
# define a callback function on 'pvname' and 'value'
def onChanges(pvname=None, value=None, **kw):
scan = caget('CTRL-SUP-BOY:PERF-SCAN.VAL')
print pvname, str(value), repr(kw)
camonitor('CTRL-SUP-BOY:PERF-SCAN.VAL', callback=onChanges)
# Valve status initialisation
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', init)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', init)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-TRIP', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-INTLK', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FOMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-LOMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-MAMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-AUMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-IOERR', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-IOSIM', 0)
# wait for START
for iLoop in range(0, maxLoop):
sleep(tempo)
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', open)
for x in range(int(init), int(open)+1):
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', x)
extra = [ 'CTRL-SUP-BOY:VC'+ str(iValve) + '-FOMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-LOMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-MAMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-AUMD']
output = (0 if caget(extra[x % len(extra)]) else 1)
caput(extra[x % len(extra)], output)
sleep(scan)
sleep(tempo)
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', close)
for x in range(int(open), int(close)-1, -1):
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', x)
extra = [ 'CTRL-SUP-BOY:VC'+ str(iValve) + '-INTLK'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-TRIP'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-IOSIM'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-IOERR']
output = (0 if caget(extra[x % len(extra)]) else 1)
caput(extra[x % len(extra)], output)
sleep(scan)
| epl-1.0 | -6,816,675,242,629,080,000 | 38.352113 | 65 | 0.581246 | false | 2.60149 | false | false | false |
FAIMS/FAIMS-Tools | generators/christian/util/xml.py | 1 | 5339 | from lxml import etree
import hashlib
import re
from consts import *
def parseXml(filename):
parser = etree.XMLParser(strip_cdata=False)
try:
tree = etree.parse(filename, parser)
except etree.XMLSyntaxError as e:
print e
exit()
tree = tree.getroot()
return tree
def hasAttrib(e, a):
try:
return a in e.attrib
except:
return False
def deleteAttribFromTree(t, attrib):
if t == None:
return
if hasattr(t, 'attrib') and attrib in t.attrib:
del t.attrib[attrib]
for e in t:
deleteAttribFromTree(e, attrib)
def getAttribVal(node, attribName):
if hasattr(node, 'attrib') and attribName in node.attrib:
return node.attrib[attribName]
return None
def appendToAttrib(node, attribName, attribVal):
'''
Appends the string `attribVal` to the attribute `attribName` in `node`.
`node` is an lxml Element.
'''
oldAttribVal = getAttribVal(node, attribName) or ''
if not attribVal:
return
if attribVal in oldAttribVal.split():
return
if oldAttribVal: newAttribVal = oldAttribVal + SEP_FLAGS + attribVal
else: newAttribVal = attribVal
node.attrib[attribName] = newAttribVal
def setSourceline(t, sourceline):
if t == None:
return
t.sourceline = sourceline
for e in t:
setSourceline(e, sourceline)
def getAll(node, keep=None, descendantOrSelf=True):
'''
Returns the list of nodes which are the descendants of `node`. Optionally,
`node` can be included in the list.
'''
keepIsNone = keep == None
keepIsFunction = hasattr(keep, '__call__')
assert keepIsNone or keepIsFunction
# Get all nodes
if descendantOrSelf: all = node.xpath('.//*')
else: all = node.xpath(' //*')
# Keep nodes in `all` for which keep(node) evaluates to true
if keep:
all = filter(keep, all)
return all
def appendNotNone(src, dst):
if src == None:
return
dst.append(src)
def extendFlatly(node, children):
'''
Acts like `lxml.etree._Element.extend`, except it flattens the list of
`children`. For example, calling `extendFlatly(node, [a, [b, c]])` is
equivalent to `node.extend([a, b, c, d])` if `node`, `a`, `b` and `c` are
instances of `lxml.etree._Element.extend`.
'''
listTypes = [list, tuple]
okTypes = listTypes + [etree._Element]
assert type(children) in okTypes
if node is None:
return
if type(children) == etree._Element:
children = [children]
for child in children:
if type(child) == etree._Element: node.append(child)
if type(child) in listTypes: node.extend(child)
def flagAll(nodes, attrib, value):
for n in nodes:
n.attrib[attrib] = value
def getIndex(node):
parent = node.getparent()
if parent == None: return 0
else: return parent.index(node)
def getPath(node):
'''
Returns a list of strings representing the ancestors of `node`, plus `node`
itself. The strings are the ancestors' tag names. For example, if `node` is
the lxml element `<My_ID/>` from a module.xml file which contains the
following:
<My_Tab_Group>
<My_Tab>
<My_ID/>
</My_Tab>
</My_Tab_Group>
'''
if node == None:
return []
if node is node.getroottree().getroot():
return []
return getPath(node.getparent()) + [node.tag]
def getPathString(node, sep='/'):
return sep.join(getPath(node))
def getPathIndex(node):
nodes = getPath(node)
return [str(getIndex(n)) for n in nodes]
def getPathIndexString(node, sep='/'):
return sep.join(getPathIndex(node))
def nodeHash(node, hashLen=10):
path = getPathString(node)
hash = hashlib.sha256(path)
hash = hash.hexdigest()
hash = hash[:hashLen]
return hash
def treeHash(node):
s = etree.tostring(
node,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'
)
hash = hashlib.sha256(s)
hash = hash.hexdigest()
return hash
def replaceElement(element, replacements):
if replacements is None:
return
# Canonicalise input
if type(replacements) in (list, tuple):
container = etree.Element('container')
for r in replacements:
container.append(r)
replacements = container
# Insert each element in `replacements` at the location of `element`. The
# phrasing is a bit opaque here because lxml *moves* nodes from
# `replacements` instead of copying them, when `insert(index, r)` is called.
returnVal = []
index = element.getparent().index(element) # Index of `element`
while len(replacements):
r = replacements[-1]
element.getparent().insert(index, r)
returnVal.append(r)
element.getparent().remove(element)
return returnVal
def insertAfter(node, nodeToInsert):
'''
Inserts `nodeToInsert` immediately after `node`.
'''
index = node.getparent().index(node) # Index of `node`
node.getparent().insert(index+1, nodeToInsert)
def insertBefore(node, nodeToInsert):
index = node.getparent().index(node) # Index of `node`
node.getparent().insert(index, nodeToInsert)
| gpl-2.0 | 8,943,742,031,703,865,000 | 26.101523 | 80 | 0.62596 | false | 3.699931 | false | false | false |
vdmann/cse-360-image-hosting-website | src/dragdrop/files.py | 1 | 2581 | # from django.contrib.contenttypes.models import ContentType
# from django.contrib.auth.models import User
# from django.db import models
# # am I importing these session libraries correctly?
# from django.contrib.sessions.models import Session
# from django.contrib.sessions.backends.cached_db import SessionStore
# from django.contrib.sessions.backends.db import SessionStore
# from django.conf import settings
# import random, string
# from importlib import import_module
# def get_path(instance, filename):
# ctype = ContentType.objects.get_for_model(instance)
# model = ctype.model
# app = ctype.app_label
# extension = filename.split('.')[-1]
# ############################################################################
# # string for the session id
# # s = request.session._session_key
# # request.user.username
# # user_print_test = models.IntegerField(User)
# # user_print_test = models.ForeignKey(User, unique=True)
# # print "in files.py this is the user_print_test value: %s" % user_print_test
# # using session outside of the views
# # SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
# # this randomly creates a sessionid everytime
# # s = SessionStore()
# # s.save()
# # s.session_key
# # print "in files.py this is the s.session_key value: %s" % s.session_key
# # get_session_key = s.session_key
# # session_var = Session.objects.get(pk=s.session_key).get_decoded()
# # print "in files.py this is the s.session_var value: %s" % session_var
# # this does not work
# # user_get_id = User.objects.get(id = session_var['_auth_user_id'])
# # print "this is the session_key value: %s" % user_get_id
# ############################################################################
# # modified code
# # dir = get_session_key
# # original code
# dir = "site"
# # if model == "job":
# # dir += "/pdf/job_attachment"
# # else:
# # dir += "/img/%s" % app
# # if model == "image_type_1":
# # dir += "/type1/%s" % instance.category
# # elif model == "image_type_2":
# # dir += "/type2"
# # elif model == "restaurant":
# # dir += "/logo"
# # else:
# # dir += "/%s" % model
# chars = string.letters + string.digits
# name = string.join(random.sample(chars, 8), '')
# # original code
# # return "%s/%s/%s.%s" % (dir, name, extension)
# return "%s/%s.%s" % (dir, filename, extension) | mit | 6,365,021,279,501,099,000 | 34.369863 | 83 | 0.566447 | false | 3.3827 | false | false | false |
klahnakoski/ActiveData-ETL | vendor/jx_base/expressions/es_nested_op.py | 4 | 1491 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
"""
# NOTE:
THE self.lang[operator] PATTERN IS CASTING NEW OPERATORS TO OWN LANGUAGE;
KEEPING Python AS# Python, ES FILTERS AS ES FILTERS, AND Painless AS
Painless. WE COULD COPY partial_eval(), AND OTHERS, TO THIER RESPECTIVE
LANGUAGE, BUT WE KEEP CODE HERE SO THERE IS LESS OF IT
"""
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import simplified
from jx_base.expressions.expression import Expression
from jx_base.language import is_op
from mo_json import BOOLEAN
class EsNestedOp(Expression):
data_type = BOOLEAN
has_simple_form = False
def __init__(self, terms):
Expression.__init__(self, terms)
self.path, self.query = terms
@simplified
def partial_eval(self):
if self.path.var == ".":
return self.query.partial_eval()
return self.lang[
EsNestedOp("es.nested", [self.path, self.query.partial_eval()])
]
def __data__(self):
return {"es.nested": {self.path.var: self.query.__data__()}}
def __eq__(self, other):
if is_op(other, EsNestedOp):
return self.path.var == other.path.var and self.query == other.query
return False
| mpl-2.0 | 1,376,558,340,162,270,200 | 28.82 | 80 | 0.668008 | false | 3.343049 | false | false | false |
ua-snap/downscale | examples.py | 1 | 2325 | # # # #
# Examples of how to run downscaling with this new package.
# this is hardwired junk unless you are on the SNAP servers at UAF.
# # # #
# AR5
if __name__ == '__main__':
# import modules
import downscale
# minimum required arguments
ar5_modeled = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_rcp26_r1i1p1_200601_210012.nc'
ar5_historical = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_historical_r1i1p1_185001_200512.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5/TEST_AR5'
# run
# down = DownscaleAR5.DownscaleAR5( ar5_modeled, ar5_historical, base_path, clim_path, template_raster_fn=template_raster_fn, ncores=32 ) #, climatology_begin, climatology_end, plev, absolute, metric, ncores )
# output = down.downscale_ar5_ts()
down = downscale.Dataset( ar5_modeled, ar5_historical, base_path, clim_path, template_raster_fn=template_raster_fn, ncores=32 ) #, climatology_begin, climatology_end, plev, absolute, metric, ncores )
output = down.downscale_ar5_ts()
# CRU
if __name__ == '__main__':
# import modules
from downscale import DownscaleCRU
# example of post_downscale_function - pass in at DownscaleCRU()
def clamp_vals( x ):
''' clamp the values following the relative humidity downscaling '''
x[ (x > 100) & (x < 500) ] = 95
return x
# minimum required arguments
cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.cld.dat.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5/CRU2'
# run
down = DownscaleCRU.DownscaleCRU( cru_ts, clim_path, template_raster_fn, base_path, absolute=False, ncores=32 )
output = down.downscale_cru_ts()
| mit | -3,292,598,839,910,450,700 | 48.468085 | 210 | 0.742796 | false | 2.543764 | false | false | false |
cardmagic/PyAMF | pyamf/adapters/_elixir.py | 1 | 1209 | # Copyright (c) 2007-2010 The PyAMF Project.
# See LICENSE for details.
"""
Elixir adapter module. Elixir adds a number of properties to the mapped instances.
@see: U{Elixir homepage (external)<http://elixir.ematia.de>}
@since: 0.6
"""
import elixir.entity
import pyamf
from pyamf import adapters
adapter = adapters.get_adapter('sqlalchemy.orm')
adapter.class_checkers.append(elixir.entity.is_entity)
class ElixirAdapter(adapter.SaMappedClassAlias):
EXCLUDED_ATTRS = adapter.SaMappedClassAlias.EXCLUDED_ATTRS + [
'_global_session']
def getCustomProperties(self):
adapter.SaMappedClassAlias.getCustomProperties(self)
self.descriptor = self.klass._descriptor
self.parent_descriptor = None
if self.descriptor.parent:
self.parent_descriptor = self.descriptor.parent._descriptor
if self.descriptor.polymorphic:
self.exclude_attrs.update([self.descriptor.polymorphic])
def _compile_base_class(self, klass):
if klass is elixir.EntityBase or klass is elixir.Entity:
return
pyamf.ClassAlias._compile_base_class(self, klass)
pyamf.register_alias_type(ElixirAdapter, elixir.entity.is_entity) | mit | 621,109,237,464,353,300 | 25.888889 | 82 | 0.717122 | false | 3.524781 | false | false | false |
teodoran/TMM-4540 | sublime-packages/AMLRepl/AMLRepl.py | 1 | 6253 | import sublime
import sublime_plugin
import subprocess
import time
import webbrowser
from threading import Thread
aml_manual_path = ""
aml_start_path = ""
aml_batch_file = ""
repl_process = None
output_lines = []
output_view = None
class AmlReplCommand(sublime_plugin.TextCommand):
def PrintStdout(self, edit, process):
global output_lines
while process.poll() is None:
output = process.stdout.readline()
output_lines.append(output)
def run(self, edit):
self.view.set_name("AML REPL")
self.view.set_syntax_file("Packages/AML/Aml.tmLanguage")
global aml_manual_path, aml_start_path, aml_batch_file
settings = sublime.load_settings("AMLRepl.sublime-settings")
aml_manual_path = settings.get("aml_manual_path")
aml_start_path = settings.get("aml_start_path")
aml_batch_file = settings.get("aml_batch_file")
global repl_process
repl_process = subprocess.Popen(
aml_batch_file, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=aml_start_path)
global output_view
output_view = self.view
stdout_thread = Thread(
target=self.PrintStdout, args=[edit, repl_process])
stdout_thread.setDaemon(True)
stdout_thread.start()
counter = 0
while (not 'nil\r\n' in output_lines) and counter < 100:
time.sleep(0.1)
counter += 1
self.view.run_command('output_lines')
class WindowEventCommand(sublime_plugin.EventListener):
def on_close(self, view):
global repl_process
if repl_process:
repl_process.stdin.write("(quit)\n")
repl_process.terminate()
repl_process = None
class ReplQuitCommand(sublime_plugin.TextCommand):
def run(self, edit):
global repl_process
if repl_process:
output_view.run_command('output_lines')
repl_process.stdin.write("(quit)\n")
repl_process.terminate()
repl_process = None
output_view.insert(
edit, output_view.size(), "AML process terminated. Bye :-)")
class ReplEvalCommand(sublime_plugin.TextCommand):
def last_sexp(self, string):
sexp, bracet_count, bracket_match, done = "", 0, 0, 0
for c in reversed(string):
if c == ')':
bracket_match += 1
bracet_count += 1
elif c == '(':
bracket_match -= 1
bracet_count += 1
if done == 0 and bracet_count > 0:
sexp = c + sexp
elif done == 1 and c == '\'':
sexp = c + sexp
elif done > 1:
break
if bracet_count > 1 and bracket_match == 0:
done += 1
return sexp
def run(self, edit):
global repl_process
if repl_process:
input_substr = None
position = self.view.sel()[0]
if position.begin() == position.end():
input_substr = self.last_sexp(
self.view.substr(sublime.Region(0, self.view.size())))
else:
input_substr = self.view.substr(
sublime.Region(position.begin(), position.end()))
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write("%s\n" % input_substr)
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class FileEvalCommand(sublime_plugin.TextCommand):
def run(self, edit):
global repl_process
global output_view
if repl_process:
input_substr = self.view.substr(
sublime.Region(0, self.view.size()))
repl_process.stdin.write("%s\n" % input_substr)
output_view.run_command('output_lines')
else:
output_view.insert(
edit, self.view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class OutputLinesCommand(sublime_plugin.TextCommand):
def run(self, edit):
global output_lines
counter = 0
while output_lines == [] and counter < 10:
time.sleep(0.1)
counter += 1
for line in output_lines:
self.view.insert(edit, self.view.size(), line)
self.view.run_command("goto_line", {"line": self.view.size()})
output_lines = []
class AmlReferenceManualCommand(sublime_plugin.TextCommand):
def run(self, edit):
global aml_manual_path
url = "file:///" + aml_manual_path + "index.html"
webbrowser.open_new(url)
class AmlGuiCommand(sublime_plugin.TextCommand):
def run(self, edit):
if repl_process:
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write("%s\n" % "(aml)")
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class AunitGuiCommand(sublime_plugin.TextCommand):
def run(self, edit):
if repl_process:
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-core-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-print-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-gui-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-main-system)")
output_view.run_command('output_lines')
repl_process.stdin.write("%s\n" % "(aunit)")
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
| bsd-2-clause | 8,199,867,089,837,572,000 | 28.635071 | 106 | 0.568047 | false | 3.845633 | false | false | false |
dsoprea/PyZap | setup.py | 1 | 1578 | from setuptools import setup, find_packages
from setuptools.command.install import install
def _pre_install():
print("Verifying that the library is accessible.")
import sys
import os.path
dev_path = os.path.dirname(__file__)
sys.path.insert(0, dev_path)
try:
import pyzap.library
except OSError as e:
print("Library can not be loaded: %s" % (str(e)))
raise
class _custom_install(install):
def run(self):
_pre_install()
install.run(self)
description = "Python wrapper library for ZapLib digital television (DVB) " \
"tuning library."
long_description = """\
This library allows a Python script to tune channels with nothing more than the
ZapLib library, and the correct tuning values for the type of DVB that you're
trying to decode. No channels.conf file is required.
"""
setup(name='pyzap',
version='0.3.1',
description=description,
long_description=long_description,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Topic :: Multimedia :: Video :: Capture',
],
keywords='dvb dvb-a dvb-c dvb-s dvb-t dvb-apps television cable',
author='Dustin Oprea',
author_email='[email protected]',
url='https://github.com/dsoprea/PyZap',
license='GPL 2',
packages=find_packages(exclude=[]),
include_package_data=True,
zip_safe=True,
cmdclass={ 'install': _custom_install },
)
| gpl-2.0 | -4,752,078,199,824,638,000 | 29.346154 | 80 | 0.648923 | false | 3.76611 | false | false | false |
smarkets/marge-bot | marge/interval.py | 1 | 4396 | import operator
from enum import Enum, unique
import maya
# pylint: disable=invalid-name
@unique
class WeekDay(Enum):
Monday = 0
Tuesday = 1
Wednesday = 2
Thursday = 3
Friday = 4
Saturday = 5
Sunday = 6
_DAY_NAMES = {day.name.lower(): day for day in WeekDay}
_DAY_NAMES.update((day.name.lower()[:3], day) for day in WeekDay)
_DAY_NAMES.update((day, day) for day in WeekDay)
def find_weekday(string_or_day):
if isinstance(string_or_day, WeekDay):
return string_or_day
if isinstance(string_or_day, str):
return _DAY_NAMES[string_or_day.lower()]
raise ValueError('Not a week day: %r' % string_or_day)
class WeeklyInterval:
def __init__(self, from_weekday, from_time, to_weekday, to_time):
from_weekday = find_weekday(from_weekday)
to_weekday = find_weekday(to_weekday)
# the class invariant is that from_weekday <= to_weekday; so when this
# is not the case (e.g. a Fri-Mon interval), we store the complement interval
# (in the example, Mon-Fri), and invert the criterion
self._is_complement_interval = from_weekday.value > to_weekday.value
if self._is_complement_interval:
self._from_weekday = to_weekday
self._from_time = to_time
self._to_weekday = from_weekday
self._to_time = from_time
else:
self._from_weekday = from_weekday
self._from_time = from_time
self._to_weekday = to_weekday
self._to_time = to_time
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
pat = '{class_name}({from_weekday}, {from_time}, {to_weekday}, {to_time})'
if self._is_complement_interval:
return pat.format(
class_name=self.__class__.__name__,
from_weekday=self._to_weekday,
from_time=self._to_time,
to_weekday=self._from_weekday,
to_time=self._from_time,
)
return pat.format(
class_name=self.__class__.__name__,
from_weekday=self._from_weekday,
from_time=self._from_time,
to_weekday=self._to_weekday,
to_time=self._to_time,
)
@classmethod
def from_human(cls, string):
from_, to_ = string.split('-')
def parse_part(part):
part = part.replace('@', ' ')
parts = part.split()
weekday = parts[0]
time = parts[1]
timezone = parts[2] if len(parts) > 2 else 'UTC'
weekday = find_weekday(weekday)
time = maya.parse(time, timezone=timezone).datetime().time()
return weekday, time
from_weekday, from_time = parse_part(from_)
to_weekday, to_time = parse_part(to_)
return cls(from_weekday, from_time, to_weekday, to_time)
def covers(self, date):
return self._interval_covers(date) != self._is_complement_interval
def _interval_covers(self, date):
weekday = date.date().weekday()
time = date.time()
before = operator.le if self._is_complement_interval else operator.lt
if not self._from_weekday.value <= weekday <= self._to_weekday.value:
return False
if self._from_weekday.value == weekday and before(time, self._from_time):
return False
if self._to_weekday.value == weekday and before(self._to_time, time):
return False
return True
class IntervalUnion:
def __init__(self, iterable):
self._intervals = list(iterable)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return '{o.__class__.__name__}({o._intervals})'.format(o=self)
@classmethod
def empty(cls):
return cls(())
@classmethod
def from_human(cls, string):
strings = string.split(',')
return cls(WeeklyInterval.from_human(s) for s in strings)
def covers(self, date):
return any(interval.covers(date) for interval in self._intervals)
| bsd-3-clause | -4,779,240,821,705,119,000 | 29.741259 | 85 | 0.574158 | false | 3.802768 | false | false | false |
socketpair/asynqp | src/asynqp/serialisation.py | 3 | 8653 | import struct
from .exceptions import AMQPError
from datetime import datetime, timezone
def rethrow_as(expected_cls, to_throw):
def decorator(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except expected_cls as e:
raise to_throw from e
return wrapper
return decorator
###########################################################
# Deserialisation
###########################################################
@rethrow_as(struct.error, AMQPError('failed to read an octet'))
def read_octet(stream):
return _read_octet(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short'))
def read_short(stream):
return _read_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned short'))
def read_unsigned_short(stream):
return _read_unsigned_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long'))
def read_long(stream):
return _read_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long'))
def read_unsigned_long(stream):
return _read_unsigned_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long long'))
def read_long_long(stream):
return _read_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long long'))
def read_unsigned_long_long(stream):
return _read_unsigned_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short string'))
def read_short_string(stream):
return _read_short_string(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long string'))
def read_long_string(stream):
return _read_long_string(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read a table'))
@rethrow_as(struct.error, AMQPError('failed to read a table'))
def read_table(stream):
return _read_table(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read an array'))
@rethrow_as(struct.error, AMQPError('failed to read an array'))
def read_array(stream):
return _read_array(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bool(stream):
return _read_bool(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bools(byte, number_of_bools):
bits = "{0:b}".format(byte)
bits = "0" * (number_of_bools - len(bits)) + bits
return (b == "1" for b in reversed(bits))
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_timestamp(stream):
return _read_timestamp(stream)[0]
def qpid_rabbit_mq_table():
# TODO: fix amqp 0.9.1 compatibility
# TODO: Add missing types
TABLE_VALUE_PARSERS = {
b't': _read_bool,
b'b': _read_signed_byte,
b's': _read_short,
b'I': _read_long,
b'l': _read_long_long,
b'f': _read_float,
b'S': _read_long_string,
b'A': _read_array,
b'V': _read_void,
b'x': _read_byte_array,
b'F': _read_table,
b'T': _read_timestamp
}
return TABLE_VALUE_PARSERS
def _read_table(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
table = {}
table_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < table_length + initial_long_size:
key, x = _read_short_string(stream)
consumed += x
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
table[key] = value
return table, consumed
def _read_short_string(stream):
str_length, x = _read_octet(stream)
string = stream.read(str_length).decode('utf-8')
return string, x + str_length
def _read_long_string(stream):
str_length, x = _read_unsigned_long(stream)
buffer = stream.read(str_length)
if len(buffer) != str_length:
raise AMQPError("Long string had incorrect length")
return buffer.decode('utf-8'), x + str_length
def _read_octet(stream):
x, = struct.unpack('!B', stream.read(1))
return x, 1
def _read_signed_byte(stream):
x, = struct.unpack_from('!b', stream.read(1))
return x, 1
def _read_bool(stream):
x, = struct.unpack('!?', stream.read(1))
return x, 1
def _read_short(stream):
x, = struct.unpack('!h', stream.read(2))
return x, 2
def _read_unsigned_short(stream):
x, = struct.unpack('!H', stream.read(2))
return x, 2
def _read_long(stream):
x, = struct.unpack('!l', stream.read(4))
return x, 4
def _read_unsigned_long(stream):
x, = struct.unpack('!L', stream.read(4))
return x, 4
def _read_long_long(stream):
x, = struct.unpack('!q', stream.read(8))
return x, 8
def _read_unsigned_long_long(stream):
x, = struct.unpack('!Q', stream.read(8))
return x, 8
def _read_float(stream):
x, = struct.unpack('!f', stream.read(4))
return x, 4
def _read_timestamp(stream):
x, = struct.unpack('!Q', stream.read(8))
# From datetime.fromutctimestamp converts it to a local timestamp without timezone information
return datetime.fromtimestamp(x * 1e-3, timezone.utc), 8
def _read_array(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
field_array = []
# The standard says only long, but unsigned long seems sensible
array_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < array_length + initial_long_size:
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
field_array.append(value)
return field_array, consumed
def _read_void(stream):
return None, 0
def _read_byte_array(stream):
byte_array_length, x = _read_unsigned_long(stream)
return stream.read(byte_array_length), byte_array_length + x
###########################################################
# Serialisation
###########################################################
def pack_short_string(string):
buffer = string.encode('utf-8')
return pack_octet(len(buffer)) + buffer
def pack_long_string(string):
buffer = string.encode('utf-8')
return pack_unsigned_long(len(buffer)) + buffer
def pack_field_value(value):
if value is None:
return b'V'
if isinstance(value, bool):
return b't' + pack_bool(value)
if isinstance(value, dict):
return b'F' + pack_table(value)
if isinstance(value, list):
return b'A' + pack_array(value)
if isinstance(value, bytes):
return b'x' + pack_byte_array(value)
if isinstance(value, str):
return b'S' + pack_long_string(value)
if isinstance(value, datetime):
return b'T' + pack_timestamp(value)
if isinstance(value, int):
if value.bit_length() < 8:
return b'b' + pack_signed_byte(value)
if value.bit_length() < 32:
return b'I' + pack_long(value)
if isinstance(value, float):
return b'f' + pack_float(value)
raise NotImplementedError()
def pack_table(d):
buffer = b''
for key, value in d.items():
buffer += pack_short_string(key)
# todo: more values
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_octet(number):
return struct.pack('!B', number)
def pack_signed_byte(number):
return struct.pack('!b', number)
def pack_unsigned_byte(number):
return struct.pack('!B', number)
def pack_short(number):
return struct.pack('!h', number)
def pack_unsigned_short(number):
return struct.pack('!H', number)
def pack_long(number):
return struct.pack('!l', number)
def pack_unsigned_long(number):
return struct.pack('!L', number)
def pack_long_long(number):
return struct.pack('!q', number)
def pack_unsigned_long_long(number):
return struct.pack('!Q', number)
def pack_float(number):
return struct.pack('!f', number)
def pack_bool(b):
return struct.pack('!?', b)
def pack_timestamp(timeval):
number = int(timeval.timestamp() * 1e3)
return struct.pack('!Q', number)
def pack_byte_array(value):
buffer = pack_unsigned_long(len(value))
buffer += value
return buffer
def pack_array(items):
buffer = b''
for value in items:
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_bools(*bs):
tot = 0
for n, b in enumerate(bs):
x = 1 if b else 0
tot += (x << n)
return pack_octet(tot)
| mit | 2,299,834,258,204,521,500 | 23.582386 | 98 | 0.622905 | false | 3.310252 | false | false | false |
patta42/pySICM | scanmodes/readVIn.py | 2 | 7823 | # Copyright (C) 2015 Patrick Happel <[email protected]>
#
# This file is part of pySICM.
#
# pySICM is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later
# version.
#
# pySICM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pySICM. If not, see <http://www.gnu.org/licenses/>.
import pySICM.sicm
from twisted.internet import defer, reactor
import struct
import pycomedi.device as Device
import pycomedi.subdevice as Subdevice
import pycomedi.channel as Channel
import pycomedi.constant as CONSTANTS
import pycomedi.utility as Util
import numpy as np
import time
class MyCallbackReader(Util.Reader):
def __init__(self, callback=None, count=None, **kwargs):
self.callback = callback
self.count = count
super(MyCallbackReader, self).__init__(**kwargs)
def start(self):
super(MyCallbackReader,self).start()
if self.callback:
self.callback(self.buffer)
def run(self):
count = self.count
block_while_running = self.block_while_running
while count is None or count > 0:
if count is not None:
count -= 1
try:
self.block_while_running = False
super(MyCallbackReader, self).run()
finally:
self.block_while_running = block_while_running
if self.block_while_running:
self.block()
def isAlive(self):
return super(MyCallbackReader,self,).isAlive()
class ReadVIn (pySICM.sicm._SICMMeasurement):
def __init__(self):
super(ReadVIn, self).__init__()
self._setRequired('InputSignal', 1, pySICM.sicm.InputSignal)
self._setRequiredOptions('ReadVIn.InputSignal', 1, int, 'Id of the InputSignal to use. (int)')
self._setRequired('Samples', 1, int)
self._setRequiredOptions('ReadVIn.Samples', 1, int, 'Number of samples to be read in one run. (int)')
self._setRequired('Duration', 1, int)
self._setRequiredOptions('ReadVIn.Duration', 1, int, 'Duration of one run in milliseconds. (int)')
self._setRequiredOptions('ReadVIn.Loop', 1, bool, 'Loop infinitely? (bool)')
self._setRequired('Loop', 1, bool)
self.stop = False
self.device = None
self.ai = None
self.channel = None
self.runs=0
self.reader=None
def checkAndSetConfig(self, settings):
if 'mode' not in settings:
return False
if settings['mode'] != 'readVIn':
return False
if 'readVIn' not in settings:
return False
rsettings = settings['readVIn']
if 'Duration' not in rsettings:
return False
self.setConfig('Duration', int(rsettings['Duration']))
if 'Samples' not in rsettings:
return False
self.setConfig('Samples', int(rsettings['Samples']))
if 'Loop' not in rsettings:
return False
self.setConfig('Loop', bool(int(rsettings['Loop'])))
return True
def fake(self, settings, writeResponse):
self.writeResponse = writeResponse
self.stop = False
self.runs=0;
if self.checkAndSetConfig(settings):
self.nextFakeDataPoint('')
else:
self.writeResponse("NACK\r\n")
def generateFakeDeferred(self):
self.d = defer.Deferred()
self.d.addCallback(self.writeResponse)
self.d.addCallback(self.nextFakeDataPoint)
def generateDeferred(self):
self.d = defer.Deferred()
self.d.addCallback(self.writeResponse)
self.d.addCallback(self.nextDataPoint)
def checkData(self):
pass
def nextFakeDataPoint(self, args):
self.runs=self.runs+1
self.data = []
global reactor
self.d = None
self.generateFakeDeferred()
if (self.getConfig('Loop') and self.stop is False) or self.runs == 1:
self.call = reactor.callLater(0,self._fake)
else:
self.destroy()
def _fake(self):
time.sleep(float(self.getConfig('Duration'))*1e-3)
y = []
noise = np.random.normal(0,0.05,1024)
for i in xrange(self.getConfig('Samples')):
y.append(np.sin(np.pi*2*i/(self.getConfig('Samples')/4.0))+noise[i%1024])
y = y - np.min(y)
y = y / np.max(y)
y = y * np.iinfo(np.uint16).max
data = ""
for i in y:
data = data + self.mkByte(int(round(i)))
self.d.callback(data)
#return d
def mkByte(self, number):
# little endian
a = int(number / 256)
b = int(number % 256)
return struct.pack('B',b)+struct.pack('B',a)
def setStop(self):
self.stop = True
def scan(self, settings, writeResponse):
self.writeResponse = writeResponse
self.device = Device.Device('/dev/comedi0')
self.device.open()
self.ai = self.device.find_subdevice_by_type(
CONSTANTS.SUBDEVICE_TYPE.ai,
factory = Subdevice.StreamingSubdevice)
channel = self.ai.channel(
3,
factory = Channel.AnalogChannel,
aref = CONSTANTS.AREF.ground)
best_range = channel.find_range(
unit=CONSTANTS.UNIT.volt,
min = -5,
max = 5)
self.channel = self.ai.channel(
3,
factory = Channel.AnalogChannel,
aref = CONSTANTS.AREF.ground,
range = best_range)
if self.checkAndSetConfig(settings):
self.frequency = 1e3*(
float(self.getConfig('Samples'))/float(self.getConfig('Duration')))
command = self.ai.get_cmd_generic_timed(1, scan_period_ns=1e9/self.frequency)
command.chanlist = [self.channel]
command.stop_src = CONSTANTS.TRIG_SRC.count
command.stop_arg = self.getConfig('Samples')
self.command=command
buf = np.zeros(self.getConfig('Samples'), np.uint16)
self.nextDataPoint('');
def nextDataPoint(self, args):
self.runs=self.runs+1
self.ai.cmd = self.command
while self.ai.get_flags().busy and self.ai.get_flags().running:
time.sleep(.0001)
print "Sleeping..."
self.ai.cancel()
self.ai.command()
self.data = []
global reactor
self.d = None
self.generateDeferred()
if (self.getConfig('Loop') and self.stop is False) or self.runs == 1:
self.call = reactor.callLater(0,self._scan)
else:
self.destroy()
def dataMeasured(self, data):
# print "Runs is: %i"% self.runs
# print "Data Received: %i"%time.time()
print s#self.writeResponse(s)
# if self.reader.isAlive():
# self.reader.join()
# self.nextDataPoint()
def _scan(self):
buf = np.zeros(self.getConfig('Samples'),np.uint16);
reader = Util.Reader(self.ai, buf);
reader.start()
reader.join()
print "Length after reader joined: %i" % len(reader.buffer)
s = ''
for i in reader.buffer:
s = s + self.mkByte(i)
self.d.callback(s)
def destroy(self):
self.runs=0
super(ReadVIn, self).destroy()
self.device.close()
| gpl-3.0 | -549,848,359,958,129,540 | 32.865801 | 109 | 0.589927 | false | 3.801263 | true | false | false |
plilja/adventofcode | 2016/day25/day25.py | 1 | 4104 | import sys
class Machine():
def __init__(self, a, b, c, d, pc):
self.a = a
self.b = b
self.c = c
self.d = d
self.pc = pc
def __hash__(self):
return hash((self.a, self.b, self.c, self.d, self.pc))
def __eq__(self, other):
return (self.a, self.b, self.c, self.d, self.pc) == (other.a, other.b, other.c, other.d, other.pc)
def copy(self):
return Machine(self.a, self.b, self.c, self.d, self.pc)
def solve(inp):
i = 0
while True:
copied_list = inp[:]
m = Machine(i, 0, 0, 0, 0)
if execute(m, copied_list):
return i
i += 1
def execute(m, instructions):
visited = set()
signal = []
while m.pc < len(instructions):
xs = instructions[m.pc].strip().split()
if xs[0] == 'cpy':
cpy(m, xs[1], xs[2])
elif xs[0] == 'inc':
inc(m, xs[1])
elif xs[0] == 'dec':
dec(m, xs[1])
elif xs[0] == 'jnz':
jnz(m, xs[1], xs[2])
elif xs[0] == 'tgl':
tgl(m, xs[1], instructions)
elif xs[0] == 'out':
out(m, signal, xs[1])
if not valid_signal(signal):
return False
m_copy = m.copy()
if (m_copy, tuple(instructions)) in visited:
return True
visited |= {(m_copy, tuple(instructions))}
else:
raise ValueError('Unexpected instruction [%s]' % inp[0])
def valid_signal(signal):
if not signal:
return True
a = signal[0]
if a != 0 and a != 1:
return False
for b in signal[1:]:
if b != (a + 1) % 2:
return False
a = b
return True
def out(m, signal, unknown):
v = register_or_constant(m, unknown)
signal.append(v)
m.pc += 1
def register_or_constant(machine, unknown):
if is_constant(unknown):
return int(unknown)
else:
return get_register(machine, unknown)
def is_constant(unknown):
return unknown not in ('a', 'b', 'c', 'd')
def cpy(m, value, register):
v = register_or_constant(m, value)
if not is_constant(register):
set_register(m, register, v)
m.pc += 1
def inc(m, register):
if not is_constant(register):
v = get_register(m, register)
set_register(m, register, v + 1)
m.pc += 1
def dec(m, register):
if not is_constant(register):
v = get_register(m, register)
set_register(m, register, v - 1)
m.pc += 1
def jnz(m, value, delta):
v = register_or_constant(m, value)
d = register_or_constant(m, delta)
if v == 0:
m.pc += 1
else:
m.pc += int(d)
def tgl(m, unknown, instructions):
v = register_or_constant(m, unknown)
if m.pc + v >= 0 and m.pc + v < len(instructions):
instruction = instructions[m.pc + v].strip().split()
if len(instruction) == 2:
if instruction[0] == 'inc':
instructions[m.pc + v] = 'dec %s' % (instruction[1])
else:
instructions[m.pc + v] = 'inc %s' % (instruction[1])
else:
assert(len(instruction) == 3)
if instruction[0] == 'jnz':
instructions[m.pc + v] = 'cpy %s %s' % (instruction[1], instruction[2])
else:
instructions[m.pc + v] = 'jnz %s %s' % (instruction[1], instruction[2])
m.pc += 1
def get_register(machine, register):
if register == 'a':
return machine.a
if register == 'b':
return machine.b
if register == 'c':
return machine.c
if register == 'd':
return machine.d
raise ValueError('Unexpected register %s' % register)
def set_register(machine, register, value):
if register == 'a':
machine.a = value
elif register == 'b':
machine.b = value
elif register == 'c':
machine.c = value
elif register == 'd':
machine.d = value
else:
raise ValueError('Unexpected register %s' % register)
inp = sys.stdin.readlines()
print(solve(inp))
| gpl-3.0 | -8,875,775,524,525,946,000 | 23.57485 | 106 | 0.51462 | false | 3.355683 | false | false | false |
T3CHNOLOG1C/Zoidbot | addons/memes.py | 1 | 8729 | import discord
from discord.ext import commands
from sys import argv
class Memes:
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def _meme(self, ctx, msg):
author = ctx.message.author
if ctx.message.channel.name[0:5] == "help-" or "assistance" in ctx.message.channel.name or (self.bot.nomemes_role in author.roles):
await self.bot.delete_message(ctx.message)
try:
await self.bot.send_message(author, "Meme commands are disabled in this channel, or your priviledges have been revoked.")
except discord.errors.Forbidden:
await self.bot.say(author.mention + " Meme commands are disabled in this channel, or your priviledges have been revoked.")
else:
await self.bot.say(self.bot.escape_name(ctx.message.author.display_name) + ": " + msg)
# list memes
@commands.command(name="listmemes", pass_context=True)
async def _listmemes(self, ctx):
"""List meme commands."""
# this feels wrong...
funcs = dir(self)
msg = "```\n"
msg += ", ".join(func for func in funcs if func != "bot" and func[0] != "_")
msg += "```"
await self._meme(ctx, msg)
# 3dshacks memes
@commands.command(pass_context=True, hidden=True)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(pass_context=True, hidden=True)
async def adrian1(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/j0Dkv2Z.png")
@commands.command(pass_context=True, hidden=True)
async def adrian2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(pass_context=True, hidden=True)
async def adrian3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsDWK9U.png")
@commands.command(pass_context=True, hidden=True)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/xesCnmM.jpg")
@commands.command(pass_context=True, hidden=True)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/GMRp1dj.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/WLncIsi.gif")
@commands.command(pass_context=True, hidden=True)
async def inori2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/V0uu99A.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/so8thgu.gifv")
@commands.command(pass_context=True, hidden=True)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/267IXh1.gif")
@commands.command(pass_context=True, hidden=True)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lKcsiBP.png")
@commands.command(pass_context=True, hidden=True)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/SIJzpau.gifv")
@commands.command(pass_context=True, hidden=True)
async def kina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/8Mm5ZvB.jpg")
@commands.command(pass_context=True, hidden=True)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/zf2XrNk.gifv")
@commands.command(pass_context=True, hidden=True)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/dqh3fNi.png")
@commands.command(pass_context=True, hidden=True)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/sjQZKBF.gif")
@commands.command(pass_context=True, hidden=True)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(pass_context=True, hidden=True)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(pass_context=True, hidden=True)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ARsOh3p.jpg")
@commands.command(pass_context=True, hidden=True)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsJ191C.png")
@commands.command(pass_context=True, hidden=True)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/tnWSXf7.png")
@commands.command(pass_context=True, hidden=True)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ZPMveve.jpg")
@commands.command(pass_context=True, hidden=True)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/wRVuidH.gif")
@commands.command(pass_context=True, hidden=True)
async def xarec2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/A59RbRT.png")
@commands.command(pass_context=True, hidden=True)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/UYbIZYs.gifv")
@commands.command(pass_context=True, hidden=True)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/bgvuHAd.png")
@commands.command(pass_context=True, hidden=True)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vpu8bX3.png")
@commands.command(pass_context=True, hidden=True)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
@commands.command(pass_context=True, hidden=True)
async def bigsmoke(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vo5l6Fo.jpg\nALL YOU HAD TO DO WAS FOLLOW THE DAMN GUIDE CJ!")
@commands.command(pass_context=True, hidden=True)
async def bigorder(self, ctx):
"""Memes."""
await self._meme(ctx, "I’ll have two number 9s, a number 9 large, a number 6 with extra dip, a number 7, two number 45s, one with cheese, and a large soda.")
# Cute commands :3
@commands.command(pass_context=True, hidden=True)
async def headpat(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(pass_context=True, hidden=True)
async def headpat2(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/djhHX0n.gifv")
@commands.command(pass_context=True, hidden=True)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/VHlIZRC.png")
@commands.command(pass_context=True, hidden=True)
async def rawr(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/Bqw4OwQ.png")
@commands.command(pass_context=True, hidden=True)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/OyjCHNe.png")
@commands.command(pass_context=True, hidden=True)
async def led(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/FYsxaUZ.jpg")
@commands.command(pass_context=True, hidden=True)
async def snickers(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/Ek0uDUn.jpg")
@commands.command(pass_context=True, hidden=True)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lw80tT0.gif")
@commands.command(pass_context=True, hidden=True)
async def rollsafe(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/n0xi1gZ.png")
@commands.command(pass_context=True, hidden=True)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/RQeZErU.png")
@commands.command(pass_context=True, hidden=True)
async def dev(self, ctx):
"""Reminds user where they are."""
await self.bot.say("You seem to be in <#196635781798952960>.")
# Load the extension
def setup(bot):
bot.add_cog(Memes(bot))
| apache-2.0 | -7,467,788,534,429,937,000 | 34.741803 | 165 | 0.603715 | false | 3.055711 | false | false | false |
feketemihai/l10n-romania | l10n_ro_account_bank_statement/wizard/account_statement_from_invoice.py | 2 | 3259 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_inherit = "account.statement.from.invoice.lines"
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(
cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.amount_residual
elif line.credit > 0:
amount = -line.amount_residual
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, -line.amount_residual_currency, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,141,364,515,124,514,000 | 38.743902 | 113 | 0.573489 | false | 4.167519 | false | false | false |
mrmiguez/citrus | debug/FlaLD_DC.py | 1 | 9796 | #!/usr/bin/env python3
import re
import sys
import json
import requests
from lxml import etree
from pymods import OAIReader
from bs4 import BeautifulSoup
sys.path.append('../assets')
import assets
tn = {'name': 'sobek', 'prefix': 'http://dpanther.fiu.edu/sobek/content'}
nameSpace_default = { None: '{http://www.loc.gov/mods/v3}',
'oai_dc': '{http://www.openarchives.org/OAI/2.0/oai_dc/}',
'dc': '{http://purl.org/dc/elements/1.1/}',
'mods': '{http://www.loc.gov/mods/v3}',
'dcterms': '{http://purl.org/dc/terms/}',
'xlink': '{http://www.w3.org/1999/xlink}',
'repox': '{http://repox.ist.utl.pt}',
'oai_qdc': '{http://worldcat.org/xmlschemas/qdc-1.0/}'}
PROVIDER = 'FSU'
dprovide = 'FSU'
dc = nameSpace_default['dc']
VERBOSE = True
def write_json_ld(docs):
with open('testData/fiu-repoxfull.json', 'w') as jsonOutput:
json.dump(docs, jsonOutput, indent=2)
with open('test_data/fiu_bzs-1.xml', encoding='utf-8') as data_in:
records = OAIReader(data_in)
docs = []
for record in records:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
pass
else:
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
# logging.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
# sourceResource.collection
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in
record.metadata.get_element(
'.//{0}contributor'.format(dc),
delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = []
for name in record.metadata.get_element('.//{0}creator'.format(dc),
delimiter=';'):
# need to test for ( Contributor ) and ( contributor )
if (len(name) > 0) and ("ontributor )" not in name):
sourceResource['creator'].append({"name": name.strip(" ")})
elif "ontributor )" in name:
if 'contributor' not in sourceResource.keys():
sourceResource['contributor'] = []
sourceResource['contributor'].append({"name": name.strip(
" ").rstrip("( Contributor )").rstrip(
"( contributor )")})
else:
sourceResource['contributor'].append(
{"name": name.strip(" ").rstrip(
"( Contributor )").rstrip("( contributor )")})
# sourceResource.date
date = record.metadata.get_element('.//{0}date'.format(dc))
if date:
sourceResource['date'] = {"begin": date[0], "end": date[0]}
# sourceResource.description
if record.metadata.get_element('.//{0}description'.format(dc)):
sourceResource['description'] = record.metadata.get_element(
'.//{0}description'.format(dc), delimiter=';')
# sourceResource.extent
# sourceResource.format
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['format'] = record.metadata.get_element(
'.//{0}format'.format(dc))
# sourceResource.genre
# sourceResource.identifier
dPantherPURL = re.compile('dpService/dpPurlService/purl')
identifier = record.metadata.get_element('.//{0}identifier'.format(dc))
try:
for ID in identifier:
PURL = dPantherPURL.search(ID)
try:
PURL_match = PURL.string
except AttributeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
sourceResource['identifier'] = PURL_match
except TypeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
# if identifier is not None and len(identifier) > 1:
# sourceResource['identifier'] = []
# for ID in identifier:
# try:
# PURL = dPantherPURL.search(ID)
# if PURL:
# PURL_match = PURL.string
# else:
# sourceResource['identifier'].append(ID)
# except TypeError as err:
# # logging.warning(
# # 'sourceResource.identifier: {0} - {1}'.format(err,
# # oai_id))
# print(err, oai_id)
# pass
# else:
# sourceResource['identifier'] = identifier
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for element in record.metadata.get_element(
'.//{0}language'.format(dc), delimiter=';'):
if len(element) > 3:
sourceResource['language'].append({"name": element})
else:
sourceResource['language'].append({"iso_639_3": element})
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}coverage'.format(dc)):
sourceResource['spatial'] = [{'name': place}
for place in
record.metadata.get_element(
'.//{0}coverage'.format(dc))]
# sourceResource.publisher
if record.metadata.get_element('.//{0}publisher'.format(dc)):
sourceResource['publisher'] = record.metadata.get_element(
'.//{0}publisher'.format(dc))
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rights = record.metadata.get_element('.//{0}rights'.format(dc))
if rights:
sourceResource['rights'] = [{'text': rights[0]}]
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = []
for term in record.metadata.get_element('.//{0}subject'.format(dc),
delimiter=';'):
term = re.sub("\( lcsh \)$", '', term)
if len(term) > 0:
sourceResource['subject'].append({"name": term.strip(" ")})
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title:
sourceResource['title'] = title
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
print('Rights', oai_id)
continue
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
# webResource.fileFormat
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
try:
preview = assets.thumbnail_service(PURL_match, tn)
except UnboundLocalError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
continue
# aggregation.provider
try:
docs.append({"@context": "http://api.dp.la/items/context",
"sourceResource": sourceResource,
"aggregatedCHO": "#sourceResource",
"dataProvider": data_provider,
"isShownAt": PURL_match,
"preview": preview,
"provider": PROVIDER})
except NameError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
pass
#write_json_ld(docs) # write test
print(json.dumps(docs, indent=2)) # dump test
| mit | -2,992,809,091,714,091,000 | 40.508475 | 87 | 0.459677 | false | 4.732367 | false | false | false |
abhisheksugam/Climate_Police | Climate_Police/tests/temp_map.py | 1 | 1335 | import plotly.offline as py
py.init_notebook_mode()
from temp_pre_process import temp_pre_process
def temp_map(temp, year):
# Pre-processes the temperature data so that it can be plotted by plotly.
df2 = temp_pre_process(temp, year)
#scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
#[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
data = [ dict(
type='choropleth',
#colorscale = scl,
autocolorscale = True,
locations = df2.index,
z = df2['AverageTemperature'].astype(float),
locationmode = 'USA-states',
text = df2['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = '°C')
) ]
layout = dict(
title = year+' US Average Temperature by State<br>(Hover for details)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, filename='us-temperature-map' )
plotSuccessful = "Temperature map plotted."
return fig, plotSuccessful
| mit | -5,169,399,646,883,258,000 | 30.023256 | 89 | 0.537481 | false | 3.073733 | false | false | false |
eduardoneira/SistemasDistribuidos_TPFinal | CentroMonitoreoCiudad/WebListener/modules/face_recognizer_client.py | 1 | 2185 | #!/bin/python3
import pika
import json
import logging
class FaceRecognizerClient(object):
def __init__(self,host,queue_send,queue_receive):
self.queue_send = queue_send
self.queue_receive = queue_receive
self.host = host
self.__connect()
def __connect(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host))
self.channel = self.connection.channel()
self.channel.queue_declare( queue=self.queue_receive)
self.channel.basic_consume(self.on_response,
no_ack=True,
queue=self.queue_receive)
def on_response(self, ch, method, props, body):
self.response = body
def publish(self, message):
self.response = None
try:
self.__publish(message)
except pika.exceptions.ConnectionClosed:
logging.warning('Se perdio la conexion, volviendo a conectarse')
self.__connect()
self.__publish(message)
logging.debug('Se envio mensaje al face recognizer. Esperando su respuesta')
while self.response is None:
self.connection.process_data_events()
logging.debug('El face recognizer respondio %s',self.response)
return self.response
def __publish(self,message):
self.channel.basic_publish(exchange='',
routing_key=self.queue_send,
properties=pika.BasicProperties(
reply_to = self.queue_receive
),
body=message)
def update(self,images):
message = {
'type': 'update',
'images': images
}
response = json.loads(self.publish(json.dumps(message)).decode('utf-8'))
return response['id']
def predict(self,faces):
message = {
'type': 'predict',
'faces': faces
}
response = json.loads(self.publish(json.dumps(message)).decode('utf-8'))
return response['ids']
def close(self):
try:
self.connection.close()
except pika.exceptions.ConnectionClosed:
logging.warning('La conexion ya estaba cerrada')
| gpl-3.0 | -7,080,464,901,833,375,000 | 27.75 | 88 | 0.596796 | false | 4.076493 | false | false | false |
patrickm/chromium.src | chrome/common/extensions/docs/server2/path_canonicalizer.py | 1 | 4304 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _SimplifyFileName(file_name):
return (posixpath.splitext(file_name)[0]
.lower()
.replace('.', '')
.replace('-', '')
.replace('_', ''))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
cached_future = self._cache.GetMulti(('canonical_paths',
'simplified_paths_map'))
def resolve():
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
cached = cached_future.Get()
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_SimplifyFileName(path)].append(canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return Future(callback=resolve)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
potential_paths = simplified_paths_map.get(_SimplifyFileName(base))
if not potential_paths:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared, not the simplified form of |path|, which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(posixpath.commonprefix((max_prefix, path)))
for path_for_file in potential_paths[1:]:
prefix_length = len(posixpath.commonprefix((path_for_file, path)))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Cron(self):
return self._LoadCache()
| bsd-3-clause | -6,192,237,749,339,063,000 | 38.486239 | 80 | 0.659619 | false | 4.075758 | false | false | false |
rmcgibbo/psi4public | psi4/driver/p4util/numpy_helper.py | 2 | 17628 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import numpy as np
from psi4 import core
from .exceptions import *
### Matrix and Vector properties
# The next three functions make me angry
def translate_interface(interface):
"""
This is extra stupid with unicode
"""
if sys.version_info[0] > 2:
return interface
nouni_interface = {}
for k, v in interface.items():
if k == 'typestr':
nouni_interface[k.encode('ascii', 'ignore')] = v.encode('ascii', 'ignore')
else:
nouni_interface[k.encode('ascii', 'ignore')] = v
return nouni_interface
class numpy_holder(object):
"""
Blank object, stupid. Apparently you cannot create a view directly from a dictionary
"""
def __init__(self, interface):
self.__array_interface__ = translate_interface(interface)
def _get_raw_views(self, copy=False):
"""
Gets simple raw view of the passed in object.
"""
ret = []
for data in self.array_interface():
# Yet another hack
if isinstance(data["shape"], list):
data["shape"] = tuple(data["shape"])
if 0 in data["shape"]:
ret.append(np.empty(shape=data["shape"]))
else:
ret.append(np.array(numpy_holder(data), copy=copy))
return ret
def _find_dim(arr, ndim):
"""
Helper function to help deal with zero or sized arrays
"""
# Zero arrays
if (arr is None) or (arr is False):
return [0] * ndim
# Make sure this is a numpy array like thing
try:
arr.shape
except:
raise ValidationError("Expected numpy array, found object of type '%s'", type(arr))
if len(arr.shape) == ndim:
return [arr.shape[x] for x in range(ndim)]
else:
raise ValidationError("Input array does not have a valid shape.")
def array_to_matrix(self, arr, name="New Matrix", dim1=None, dim2=None):
"""
Converts a numpy array or list of numpy arrays into a Psi4 Matrix (irreped if list).
Parameters
----------
arr : array or list of arrays
Numpy array or list of arrays to use as the data for a new core.Matrix
name : str
Name to give the new core.Matrix
dim1 : list, tuple, or core.Dimension (optional)
If a single dense numpy array is given, a dimension can be supplied to
apply irreps to this array. Note that this discards all extra information
given in the matrix besides the diagonal blocks determined by the passed
dimension.
dim2 :
Same as dim1 only if using a Psi4.Dimension object.
Returns
-------
matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector`
Returns the given Psi4 object
Notes
-----
This is a generalized function to convert a NumPy array to a Psi4 object
Examples
--------
>>> data = np.random.rand(20)
>>> vector = array_to_matrix(data)
>>> irrep_data = [np.random.rand(2, 2), np.empty(shape=(0,3)), np.random.rand(4, 4)]
>>> matrix = array_to_matrix(irrep_data)
>>> print matrix.rowspi().to_tuple()
(2, 0, 4)
"""
# What type is it? MRO can help.
arr_type = self.__mro__[0]
# Irreped case
if isinstance(arr, (list, tuple)):
if (dim1 is not None) or (dim2 is not None):
raise ValidationError("Array_to_Matrix: If passed input is list of arrays dimension cannot be specified.")
irreps = len(arr)
if arr_type == core.Matrix:
sdim1 = core.Dimension(irreps)
sdim2 = core.Dimension(irreps)
for i in range(irreps):
d1, d2 = _find_dim(arr[i], 2)
sdim1[i] = d1
sdim2[i] = d2
ret = self(name, sdim1, sdim2)
elif arr_type == core.Vector:
sdim1 = core.Dimension(irreps)
for i in range(irreps):
d1 = _find_dim(arr[i], 1)
sdim1[i] = d1[0]
ret = self(name, sdim1)
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type))
for view, vals in zip(ret.nph, arr):
if 0 in view.shape: continue
view[:] = vals
return ret
# No irreps implied by list
else:
if arr_type == core.Matrix:
# Build an irreped array back out
if dim1 is not None:
if dim2 is None:
raise ValidationError ("Array_to_Matrix: If dim1 is supplied must supply dim2 also")
dim1 = core.Dimension.from_list(dim1)
dim2 = core.Dimension.from_list(dim2)
if dim1.n() != dim2.n():
raise ValidationError("Array_to_Matrix: Length of passed dim1 must equal length of dim2.")
ret = self(name, dim1, dim2)
start1 = 0
start2 = 0
for num, interface in enumerate(ret.nph):
d1 = dim1[num]
d2 = dim2[num]
if (d1 == 0) or (d2 == 0):
continue
view = np.asarray(interface)
view[:] = arr[start1:start1 + d1, start2:start2 + d2]
start1 += d1
start2 += d2
return ret
# Simple case without irreps
else:
ret = self(name, arr.shape[0], arr.shape[1])
view = _get_raw_views(ret)[0]
view[:] = arr
return ret
elif arr_type == core.Vector:
# Build an irreped array back out
if dim1 is not None:
if dim2 is not None:
raise ValidationError ("Array_to_Matrix: If dim2 should not be supplied for 1D vectors.")
dim1 = core.Dimension.from_list(dim1)
ret = self(name, dim1)
start1 = 0
for num, interface in enumerate(ret.nph):
d1 = dim1[num]
if (d1 == 0):
continue
view = np.asarray(interface)
view[:] = arr[start1:start1 + d1]
start1 += d1
return ret
# Simple case without irreps
else:
ret = self(name, arr.shape[0])
ret.np[:] = arr
return ret
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type))
def _to_array(matrix, copy=True, dense=False):
"""
Converts a Psi4 Matrix or Vector to a numpy array. Either copies the data or simply
consturcts a view.
Parameters
----------
matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector`
Pointers to which Psi4 core class should be used in the construction.
copy : bool
Copy the data if True, return a view otherwise
dense : bool
Converts irreped Psi4 objects to diagonally blocked dense arrays. Returns a list of arrays otherwise.
Returns
-------
array : np.array or list of of np.array
Returns either a list of np.array's or the base array depending on options.
Notes
-----
This is a generalized function to convert a Psi4 object to a NumPy array
Examples
--------
>>> data = psi4.Matrix(3, 3)
>>> data.to_array()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
"""
if matrix.nirrep() > 1:
# We will copy when we make a large matrix
if dense:
copy = False
ret = _get_raw_views(matrix, copy=copy)
# Return the list of arrays
if dense is False:
return ret
# Build the dense matrix
if isinstance(matrix, core.Vector):
ret_type = '1D'
elif isinstance(matrix, core.Matrix):
ret_type = '2D'
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % type(matrix))
dim1 = []
dim2 = []
for h in ret:
# Ignore zero dim irreps
if 0 in h.shape:
dim1.append(0)
dim2.append(0)
else:
dim1.append(h.shape[0])
if ret_type == '2D':
dim2.append(h.shape[1])
ndim1 = np.sum(dim1)
ndim2 = np.sum(dim2)
if ret_type == '1D':
dense_ret = np.zeros(shape=(ndim1))
start = 0
for d1, arr in zip(dim1, ret):
if d1 == 0: continue
dense_ret[start: start + d1] = arr
start += d1
else:
dense_ret = np.zeros(shape=(ndim1, ndim2))
start1 = 0
start2 = 0
for d1, d2, arr in zip(dim1, dim2, ret):
if d1 == 0: continue
dense_ret[start1: start1 + d1, start2: start2 + d2] = arr
start1 += d1
start2 += d2
return dense_ret
else:
return _get_raw_views(matrix, copy=copy)[0]
def _build_view(matrix):
"""
Builds a view of the vector or matrix
"""
views = _to_array(matrix, copy=False, dense=False)
if matrix.nirrep() > 1:
return tuple(views)
else:
return views
def get_view(self):
if hasattr(self, '_np_view_data'):
return self._np_view_data
else:
self._np_view_data = _build_view(self)
return self._np_view_data
@property
def _np_shape(self):
"""
Shape of the Psi4 data object
"""
view_data = get_view(self)
if self.nirrep() > 1:
return tuple(view_data[x].shape for x in range(self.nirrep()))
else:
return view_data.shape
@property
def _np_view(self):
"""
View without only one irrep
"""
if self.nirrep() > 1:
raise ValidationError("Attempted to call .np on a Psi4 data object with multiple irreps. Please use .nph for objects with irreps.")
return get_view(self)
@property
def _nph_view(self):
"""
View with irreps.
"""
if self.nirrep() > 1:
return get_view(self)
else:
return get_view(self),
@property
def _array_conversion(self):
if self.nirrep() > 1:
raise ValidationError("__array__interface__ can only be called on Psi4 data object with only one irrep!")
else:
return self.np.__array_interface__
def _np_write(self, filename=None, prefix=""):
ret = {}
ret[prefix + "Irreps"] = self.nirrep()
ret[prefix + "Name"] = self.name
for h, v in enumerate(self.nph):
ret[prefix + "IrrepData" + str(h)] = v
if isinstance(self, core.Matrix):
ret[prefix + "Dim1"] = self.rowdim().to_tuple()
ret[prefix + "Dim2"] = self.coldim().to_tuple()
if isinstance(self, core.Vector):
ret[prefix + "Dim"] = [self.dim(x) for x in range(self.nirrep())]
if filename is None:
return ret
np.savez(filename, **ret)
def _np_read(self, filename, prefix=""):
if isinstance(filename, np.lib.npyio.NpzFile):
data = filename
elif (sys.version_info[0] == 2) and isinstance(filename, (str, unicode)):
if not filename.endswith('.npz'):
filename = filename + '.npz'
data = np.load(filename)
elif (sys.version_info[0] > 2) and isinstance(filename, str):
if not filename.endswith('.npz'):
filename = filename + '.npz'
data = np.load(filename)
else:
raise Exception("Filename not understood: %s" % filename)
ret_data = []
if ((prefix + "Irreps") not in data.keys()) or ((prefix + "Name") not in data.keys()):
raise ValidationError("File %s does not appear to be a numpyz save" % filename)
for h in range(data[prefix + "Irreps"]):
ret_data.append(data[prefix + "IrrepData" + str(h)])
arr_type = self.__mro__[0]
if arr_type == core.Matrix:
dim1 = core.Dimension.from_list(data[prefix + "Dim1"])
dim2 = core.Dimension.from_list(data[prefix + "Dim2"])
ret = self(str(data[prefix + "Name"]), dim1, dim2)
elif arr_type == core.Vector:
dim1 = core.Dimension.from_list(data[prefix + "Dim"])
ret = self(str(data[prefix + "Name"]), dim1)
for h in range(data[prefix + "Irreps"]):
ret.nph[h][:] = ret_data[h]
return ret
def _to_serial(data):
"""
Converts an object with a .nph accessor to a serialized dictionary
"""
json_data = {}
json_data["shape"] = []
json_data["data"] = []
for view in data.nph:
json_data["shape"].append(view.shape)
json_data["data"].append(view.tostring())
if len(json_data["shape"][0]) == 1:
json_data["type"] = "vector"
elif len(json_data["shape"][0]) == 2:
json_data["type"] = "matrix"
else:
raise ValidationError("_to_json is only used for vector and matrix objects.")
return json_data
def _from_serial(self, json_data):
"""
Converts serialized data to the correct Psi4 data type
"""
if json_data["type"] == "vector":
dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]])
ret = self("Vector from JSON", dim1)
elif json_data["type"] == "matrix":
dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]])
dim2 = core.Dimension.from_list([x[1] for x in json_data["shape"]])
ret = self("Matrix from JSON", dim1, dim2)
else:
raise ValidationError("_from_json did not recognize type option of %s." % str(json_data["type"]))
for n in range(len(ret.nph)):
ret.nph[n].flat[:] = np.fromstring(json_data["data"][n], dtype=np.double)
return ret
# Matrix attributes
def _chain_dot(*args, **kwargs):
"""
Chains dot products together from a series of Psi4 Matrix classes.
By default there is no transposes, an optional vector of booleans can be passed in.
"""
trans = kwargs.pop("trans", None)
if trans is None:
trans = [False for x in range(len(args))]
else:
if len(trans) != len(args):
raise ValidationError("Chain dot: The length of the transpose arguements is not equal to the length of args.")
# Setup chain
ret = args[0]
if trans[0]:
ret = ret.transpose()
# Run through
for n, mat in enumerate(args[1:]):
ret = core.Matrix.doublet(ret, mat, False, trans[n + 1])
return ret
# Matirx attributes
core.Matrix.from_array = classmethod(array_to_matrix)
core.Matrix.to_array = _to_array
core.Matrix.shape = _np_shape
core.Matrix.np = _np_view
core.Matrix.nph = _nph_view
core.Matrix.__array_interface__ = _array_conversion
core.Matrix.np_write = _np_write
core.Matrix.np_read = classmethod(_np_read)
core.Matrix.to_serial = _to_serial
core.Matrix.from_serial = classmethod(_from_serial)
core.Matrix.chain_dot = _chain_dot
# Vector attributes
core.Vector.from_array = classmethod(array_to_matrix)
core.Vector.to_array = _to_array
core.Vector.shape = _np_shape
core.Vector.np = _np_view
core.Vector.nph = _nph_view
core.Vector.__array_interface__ = _array_conversion
core.Vector.np_write = _np_write
core.Vector.np_read = classmethod(_np_read)
core.Vector.to_serial = _to_serial
core.Vector.from_serial = classmethod(_from_serial)
### CIVector properties
@property
def _civec_view(self):
"Returns a view of the CIVector's buffer"
return np.asarray(self)
core.CIVector.np = _civec_view
### Dimension properties
@classmethod
def _dimension_from_list(self, dims, name="New Dimension"):
"""
Builds a core.Dimension object from a python list or tuple. If a dimension
object is passed a copy will be returned.
"""
if isinstance(dims, (tuple, list, np.ndarray)):
irreps = len(dims)
elif isinstance(dims, core.Dimension):
irreps = dims.n()
else:
raise ValidationError("Dimension from list: Type '%s' not understood" % type(dims))
ret = core.Dimension(irreps, name)
for i in range(irreps):
ret[i] = dims[i]
return ret
def _dimension_to_tuple(dim):
"""
Converts a core.Dimension object to a tuple.
"""
if isinstance(dim, (tuple, list)):
return tuple(dim)
irreps = dim.n()
ret = []
for i in range(irreps):
ret.append(dim[i])
return tuple(ret)
def _dimension_iter(dim):
"""
Provides an iterator class for the Dimension object.
Allows:
dim = psi4.core.Dimension(...)
list(dim)
"""
for i in range(dim.n()):
yield dim[i]
# Dimension attributes
core.Dimension.from_list = _dimension_from_list
core.Dimension.to_tuple = _dimension_to_tuple
core.Dimension.__iter__ = _dimension_iter
| lgpl-3.0 | -7,813,364,256,415,750,000 | 28.331115 | 139 | 0.579306 | false | 3.634639 | false | false | false |
zhuoju36/StructEngPy | object_model/frame.py | 1 | 11514 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 10:09:44 2018
@author: Dell
"""
import uuid
from sqlalchemy.sql import and_
from .orm import Config,Point,Frame,FrameSection,FrameLoadDistributed,FrameLoadConcentrated,FrameLoadTemperature,FrameLoadStrain
import logger
def add_frame(self,pt0_coor,pt1_coor,section,name=None):
"""
Add frame object to model, if the name already exists, an exception will be raised.
param:
pt0_coor: tuple, coordinate of the end point 0 in current unit.
pt1_coor: tuple, coordinate of the end point 1 in current unit.
[name]: str, name, optional.
return:
str, the new frame's name.
"""
assert(len(pt0_coor)==3 and len(pt1_coor)==3)
if name and self.session.query(Frame).filter_by(name=name).first()!=None:
raise Exception('Name already exist!')
if self.session.query(FrameSection).filter_by(name=section).first() is None:
raise Exception("Frame section doesn't exits!")
frm=Frame()
scale=self.scale()
tol=self.session.query(Config).first().tolerance
pt0=self.session.query(Point).filter(and_(
(Point.x-pt0_coor[0]*scale['L'])<tol,(pt0_coor[0]*scale['L']-Point.x)<tol,
(Point.y-pt0_coor[1]*scale['L'])<tol,(pt0_coor[1]*scale['L']-Point.y)<tol,
(Point.z-pt0_coor[2]*scale['L'])<tol,(pt0_coor[2]*scale['L']-Point.z)<tol)).first()
if pt0==None:
pt0_name=self.add_point(pt0_coor[0]*scale['L'],pt0_coor[1]*scale['L'],pt0_coor[2]*scale['L'])
else:
pt0_name=pt0.name
pt1=self.session.query(Point).filter(and_(
(Point.x-pt1_coor[0])<tol,(pt1_coor[0]-Point.x)<tol,
(Point.y-pt1_coor[1])<tol,(pt1_coor[1]-Point.y)<tol,
(Point.z-pt1_coor[2])<tol,(pt1_coor[2]-Point.z)<tol)).first()
if pt1==None:
pt1_name=self.add_point(pt1_coor[0],pt1_coor[1],pt1_coor[2])
else:
pt1_name=pt1.name
if pt0_name<pt1_name:
order='01'
frm.pt0_name=pt0_name
frm.pt1_name=pt1_name
frm.order=order
elif pt0_name>pt1_name:
order='10'
frm.pt0_name=pt1_name
frm.pt1_name=pt0_name
frm.order=order
else:
raise Exception('Two points should not be the same!')
frm.section_name=section
frm.uuid=str(uuid.uuid1())
if name:
frm.name=name
else:
frm.name=frm.uuid
self.session.add(frm)
return frm.name
def add_frame_batch(self,pt_coors,section):
"""
Add batch of frame objects to model..
param:
pt_coors: list of float tuples as ((pt0.x,pt0.y,pt0.z),(pt1.x,pt1.y,pt1.z))
return:
status of success, and list of str, the new frame's names if successful.
"""
try:
assert(len(pt_coors[0][0])==len(pt_coors[0][1]))
if self.session.query(FrameSection).filter_by(name=section).first() is None:
raise Exception("Frame section doesn't exits!")
names=[]
frm_ends=[]
scale=self.scale()
for pt0,pt1 in pt_coors:
pt0_name=self.add_point(pt0[0]*scale['L'],pt0[1]*scale['L'],pt0[2]*scale['L'])
pt1_name=self.add_point(pt1[0]*scale['L'],pt1[1]*scale['L'],pt1[2]*scale['L'])
frm_ends.append((pt0_name,pt1_name))
tol=self.session.query(Config).first().tolerance
pts=self.session.query(Point).order_by(Point.x,Point.y,Point.z).all()
pt_map=dict([(pt.name,pt.name) for pt in pts])
pts_to_rmv=[]
for pti,ptj in zip(pts[:-1],pts[1:]):
if (ptj.x-pti.x)**2+(ptj.y-pti.y)**2+(ptj.z-pti.z)**2<tol**2:
pt_map[ptj.name]=pt_map[pti.name]
pts_to_rmv.append(ptj)
for (pt0_name,pt1_name) in frm_ends:
frm=Frame()
if pt_map[pt0_name]<pt_map[pt1_name]:
frm.pt0_name=pt_map[pt0_name]
frm.pt1_name=pt_map[pt1_name]
frm.order='01'
elif pt_map[pt0_name]>pt_map[pt1_name]:
frm.pt0_name=pt_map[pt1_name]
frm.pt1_name=pt_map[pt0_name]
frm.order='10'
else:
continue
frm.section_name=section
frm.uuid=str(uuid.uuid1())
frm.name=frm.uuid
names.append(frm.name)
self.session.add(frm)
for pt in pts_to_rmv:
self.session.delete(pt)
self.session.commit()
return True,names
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_section(self,frame,section):
"""
Assign a frame section to a frame.
params:
frame: str, name of frame.
section: str, name of section.
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
frm.section_name=section
self.session.add(frm)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_mesh(self,frame):
pass
def set_frame_load_distributed(self,frame,loadcase,load):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
load: float, list of 6 to set restraints.
return:
status of success.
"""
try:
assert len(load)==12
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadDistributed).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadDistributed()
scale=self.scale()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.p01=load[0]*scale['F']
ld.p02=load[1]*scale['F']
ld.p03=load[2]*scale['F']
ld.m01=load[3]*scale['F']*scale['L']
ld.m02=load[4]*scale['F']*scale['L']
ld.m03=load[5]*scale['F']*scale['L']
ld.p11=load[6]*scale['F']
ld.p12=load[7]*scale['F']
ld.p13=load[8]*scale['F']
ld.m11=load[9]*scale['F']*scale['L']
ld.m12=load[10]*scale['F']*scale['L']
ld.m13=load[11]*scale['F']*scale['L']
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_concentrated(self,frame,loadcase,load,loc):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
load: float, list of 6 to set restraints.
return:
status of success.
"""
try:
assert (len(load)==6 and (loc<=1 and loc>=0))
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadConcentrated).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadConcentrated()
scale=self.scale()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.p1=load[0]*scale['F']
ld.p2=load[1]*scale['F']
ld.p3=load[2]*scale['F']
ld.m1=load[3]*scale['F']*scale['L']
ld.m2=load[4]*scale['F']*scale['L']
ld.m3=load[5]*scale['F']*scale['L']
ld.loc=loc
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_strain(self,frame,loadcase,strain):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
strain: float, strain in 1-1 axis.
return:
status of success.
"""
try:
assert (strain<=1 and strain>=0)
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadStrain).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadStrain()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.strain=strain
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_temperature(self,frame,loadcase,temperature):
"""
params:
frame: str, name of frame.
loadcase: str, name of loadcase.
temperature: float, temperature in 1-1 axis.
return:
status of success.
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadTemperature).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadTemperature()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.T=temperature
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def get_frame_names_by_points(self,pt1,pt2):
"""
params:
name: str
returns:
frame name list satisfies the points
"""
pass
def get_frame_names(self):
"""
Get all the name of points in the database
returns:
frame name list if successful or None if failed.
"""
try:
frms=self.session.query(Frame).all()
return [frm.name for frm in frms]
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_end_names(self,frame):
"""
params:
frame: str, name of frame.
return:
two point names as frames start and end if successful or None if failed
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
return frm.pt0.name,frm.pt1.name
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_end_coors(self,frame):
"""
params:
frame: str, name of frame.
return:
6-list of floats end_coors in current unit if successful or None if failed
"""
try:
scale=self.scale()
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
pt0=frm.pt0
pt1=frm.pt1
return [pt0.x/scale['L'],pt0.y/scale['L'],pt0.z/scale['L'],pt1.x/scale['L'],pt1.y/scale['L'],pt1.z/scale['L']]
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_section_attribute(self,name):
"""
params:
name: str
returns:
frame section object if exist
"""
pass
def delete_frame(self,name):
try:
frm=self.session.query(Frame).filter_by(name=name)
if frm is None:
raise Exception("Frame doen't exist!")
self.session.delete(frm)
except Exception as e:
log.info(str(e))
self.session.rollback()
return False | mit | -6,769,975,149,060,318,000 | 31.43662 | 128 | 0.574344 | false | 3.255301 | false | false | false |
adrienbrault/home-assistant | homeassistant/components/vicare/sensor.py | 3 | 14061 | """Viessmann ViCare sensor device."""
import logging
import requests
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ICON,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
PERCENTAGE,
TEMP_CELSIUS,
TIME_HOURS,
)
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
CONF_GETTER = "getter"
SENSOR_TYPE_TEMPERATURE = "temperature"
SENSOR_OUTSIDE_TEMPERATURE = "outside_temperature"
SENSOR_SUPPLY_TEMPERATURE = "supply_temperature"
SENSOR_RETURN_TEMPERATURE = "return_temperature"
# gas sensors
SENSOR_BOILER_TEMPERATURE = "boiler_temperature"
SENSOR_BURNER_MODULATION = "burner_modulation"
SENSOR_BURNER_STARTS = "burner_starts"
SENSOR_BURNER_HOURS = "burner_hours"
SENSOR_BURNER_POWER = "burner_power"
SENSOR_DHW_GAS_CONSUMPTION_TODAY = "hotwater_gas_consumption_today"
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK = "hotwater_gas_consumption_heating_this_week"
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH = "hotwater_gas_consumption_heating_this_month"
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR = "hotwater_gas_consumption_heating_this_year"
SENSOR_GAS_CONSUMPTION_TODAY = "gas_consumption_heating_today"
SENSOR_GAS_CONSUMPTION_THIS_WEEK = "gas_consumption_heating_this_week"
SENSOR_GAS_CONSUMPTION_THIS_MONTH = "gas_consumption_heating_this_month"
SENSOR_GAS_CONSUMPTION_THIS_YEAR = "gas_consumption_heating_this_year"
# heatpump sensors
SENSOR_COMPRESSOR_STARTS = "compressor_starts"
SENSOR_COMPRESSOR_HOURS = "compressor_hours"
SENSOR_COMPRESSOR_HOURS_LOADCLASS1 = "compressor_hours_loadclass1"
SENSOR_COMPRESSOR_HOURS_LOADCLASS2 = "compressor_hours_loadclass2"
SENSOR_COMPRESSOR_HOURS_LOADCLASS3 = "compressor_hours_loadclass3"
SENSOR_COMPRESSOR_HOURS_LOADCLASS4 = "compressor_hours_loadclass4"
SENSOR_COMPRESSOR_HOURS_LOADCLASS5 = "compressor_hours_loadclass5"
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT = "power_production_current"
SENSOR_POWER_PRODUCTION_TODAY = "power_production_today"
SENSOR_POWER_PRODUCTION_THIS_WEEK = "power_production_this_week"
SENSOR_POWER_PRODUCTION_THIS_MONTH = "power_production_this_month"
SENSOR_POWER_PRODUCTION_THIS_YEAR = "power_production_this_year"
SENSOR_TYPES = {
SENSOR_OUTSIDE_TEMPERATURE: {
CONF_NAME: "Outside Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getOutsideTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_SUPPLY_TEMPERATURE: {
CONF_NAME: "Supply Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getSupplyTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
# gas sensors
SENSOR_BOILER_TEMPERATURE: {
CONF_NAME: "Boiler Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getBoilerTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_BURNER_MODULATION: {
CONF_NAME: "Burner modulation",
CONF_ICON: "mdi:percent",
CONF_UNIT_OF_MEASUREMENT: PERCENTAGE,
CONF_GETTER: lambda api: api.getBurnerModulation(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Hot water gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Hot water gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Hot water gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Hot water gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Heating gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Heating gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Heating gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Heating gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_STARTS: {
CONF_NAME: "Burner Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getBurnerStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_HOURS: {
CONF_NAME: "Burner Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getBurnerHours(),
CONF_DEVICE_CLASS: None,
},
# heatpump sensors
SENSOR_COMPRESSOR_STARTS: {
CONF_NAME: "Compressor Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getCompressorStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS: {
CONF_NAME: "Compressor Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHours(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS1: {
CONF_NAME: "Compressor Hours Load Class 1",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass1(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS2: {
CONF_NAME: "Compressor Hours Load Class 2",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass2(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS3: {
CONF_NAME: "Compressor Hours Load Class 3",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass3(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS4: {
CONF_NAME: "Compressor Hours Load Class 4",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass4(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS5: {
CONF_NAME: "Compressor Hours Load Class 5",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass5(),
CONF_DEVICE_CLASS: None,
},
SENSOR_RETURN_TEMPERATURE: {
CONF_NAME: "Return Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getReturnTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT: {
CONF_NAME: "Power production current",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionCurrent(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_TODAY: {
CONF_NAME: "Power production today",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionToday(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_WEEK: {
CONF_NAME: "Power production this week",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisWeek(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_MONTH: {
CONF_NAME: "Power production this month",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisMonth(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_YEAR: {
CONF_NAME: "Power production this year",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisYear(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
}
SENSORS_GENERIC = [SENSOR_OUTSIDE_TEMPERATURE, SENSOR_SUPPLY_TEMPERATURE]
SENSORS_BY_HEATINGTYPE = {
HeatingType.gas: [
SENSOR_BOILER_TEMPERATURE,
SENSOR_BURNER_HOURS,
SENSOR_BURNER_MODULATION,
SENSOR_BURNER_STARTS,
SENSOR_DHW_GAS_CONSUMPTION_TODAY,
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
SENSOR_GAS_CONSUMPTION_TODAY,
SENSOR_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_GAS_CONSUMPTION_THIS_YEAR,
],
HeatingType.heatpump: [
SENSOR_COMPRESSOR_STARTS,
SENSOR_COMPRESSOR_HOURS,
SENSOR_COMPRESSOR_HOURS_LOADCLASS1,
SENSOR_COMPRESSOR_HOURS_LOADCLASS2,
SENSOR_COMPRESSOR_HOURS_LOADCLASS3,
SENSOR_COMPRESSOR_HOURS_LOADCLASS4,
SENSOR_COMPRESSOR_HOURS_LOADCLASS5,
SENSOR_RETURN_TEMPERATURE,
],
HeatingType.fuelcell: [
# gas
SENSOR_BOILER_TEMPERATURE,
SENSOR_BURNER_HOURS,
SENSOR_BURNER_MODULATION,
SENSOR_BURNER_STARTS,
SENSOR_DHW_GAS_CONSUMPTION_TODAY,
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
SENSOR_GAS_CONSUMPTION_TODAY,
SENSOR_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_GAS_CONSUMPTION_THIS_YEAR,
# fuel cell
SENSOR_POWER_PRODUCTION_CURRENT,
SENSOR_POWER_PRODUCTION_TODAY,
SENSOR_POWER_PRODUCTION_THIS_WEEK,
SENSOR_POWER_PRODUCTION_THIS_MONTH,
SENSOR_POWER_PRODUCTION_THIS_YEAR,
],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
sensors = SENSORS_GENERIC.copy()
if heating_type != HeatingType.generic:
sensors.extend(SENSORS_BY_HEATINGTYPE[heating_type])
add_entities(
[
ViCareSensor(hass.data[VICARE_DOMAIN][VICARE_NAME], vicare_api, sensor)
for sensor in sensors
]
)
class ViCareSensor(SensorEntity):
"""Representation of a ViCare sensor."""
def __init__(self, name, api, sensor_type):
"""Initialize the sensor."""
self._sensor = SENSOR_TYPES[sensor_type]
self._name = f"{name} {self._sensor[CONF_NAME]}"
self._api = api
self._sensor_type = sensor_type
self._state = None
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None and self._state != PYVICARE_ERROR
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.service.id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor[CONF_UNIT_OF_MEASUREMENT]
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor[CONF_DEVICE_CLASS]
def update(self):
"""Update state of sensor."""
try:
self._state = self._sensor[CONF_GETTER](self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
| mit | 1,140,110,783,300,486,400 | 34.961637 | 85 | 0.660053 | false | 3.189159 | false | false | false |
1Server/OneServer | oneserver/scheduler.py | 1 | 9003 | from datetime import datetime, timedelta
import threading
import time
from heapq import heappush,heappop
##
# The different states that the task can be in. States should
# not be set manually. They should only be set by the scheduler.
class TaskState:
##
# Either unscheduled or scheduled and waiting to be run.
PENDING = 0
##
# When the task is running.
RUNNING = 1
##
# The task has finished executing.
FINISHED = 2
##
# A task is a unit of work that is to be performed on behalf
# of another part of the server or for a plugin. A task is
# scheduled to run at some point in the future and can be
# set to be a recurring event that happens at an interval.
#
# There are two methods to create a task. The first is to
# implement the ITask interface. The benefit to doing that is
# that the task can have more specialized funtionality and
# can be given more data to use for processing.
#
# The second method is to instantiate the interface and just pass
# some settings as well as a function to call when the task
# is run.
class ITask(object):
##
# Creates a task with the given time before its called
# and performs the requested action when called. The task
# can also b e set to repeat itself at the same delay
# interval.
#
# @param task the task to perform when called
# @param minutes the number of minutes to wait (default 0)
# @param hours the number of hours to wait (default 0)
# @param days the number of days to wait (default 0)
# @param recurring if the task is to repeat itself
#
# @return the task object that was created
@staticmethod
def createTask(task, minutes = 1, hours = 0, days = 0, recurring = False):
if task == None or not hasattr(task, '__call__'):
raise TypeError('A function must be given to create a task.')
if not issubclass(minutes.__class__, int) or not issubclass(hours.__class__, int) or not issubclass(days.__class__, int):
raise TypeError('The time given must be in an integer form.')
ret = ITask(minutes, hours, days, recurring)
ret.run = task
return ret
##
# Creates a task with the given time before its called.
# The task can also be set to repeat itself at the same
# delay interval.
#
# @param minutes the number of minutes to wait (default 0)
# @param hours the number of hours to wait (default 0)
# @param days the number of days to wait (default 0)
# @param recurring if the task is to repeat itself
def __init__(self, minutes = 1, hours = 0, days = 0, recurring = False):
self.minutes = minutes
self.hours = hours
self.days = days
self.recurring = recurring
self.state = TaskState.PENDING
self.timestamp = self.calculateTimestamp()
##
# Called when the task is to run. In case the task cares
# about when it is actually being called it is provided
# the time that it was executed at. This is given as a
# datetime object.
#
# @param time the time the task was actually selected
# to run at
def run(self, time):
raise NotImplementedError('Task method was not implemented.')
##
# Calculates the timestamp of when the task should next run.
#
# @return a datetime object for the next run time
def calculateTimestamp(self):
return datetime.now() + timedelta(minutes = self.minutes,
hours = self.hours,
days = self.days)
##
# Less than comparison between tasks. Based on the timestamp to next run.
def __lt__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp < other.timestamp
##
# Less than or equal to comparison between tasks. Based on the timestamp to next run.
def __le__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp <= other.timestamp
##
# Equal to comparison between tasks. Based on the timestamp to next run.
def __eq__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp == other.timestamp
##
# Not equal to comparison between tasks. Based on the timestamp to next run.
def __ne__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp != other.timestamp
##
# Greater than or equal to comparison between tasks. Based on the timestamp to next run.
def __ge__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp >= other.timestamp
##
# Greater than comparison between tasks. Based on the timestamp to next run.
def __gt__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp > other.timestamp
##
# Maximum number of threads to run tasks on
maxThreads = -1
##
# This class provides an method to run task objects given to it at a specific time.
class TaskScheduler():
##
# The main thread handle
mainThread = None
##
# Status bool
running = True
##
# Main init
# @param maxThreads Maximum number of threads
def __init__(self, maxNumThreads = 10):
global maxThreads
maxThreads = maxNumThreads
self.taskList = []
self.running = False
##
# Starts the scheduler's main thread.
# No tasks can be run before this method is called
def startScheduler(self):
if self.mainThread is not None:
if self.mainThread.isAlive():
raise RuntimeError("Tried to start an already started Scheduler")
self.mainThread = None
self.mainThread = MainSchedulerThread(self)
self.mainThread.start()
self.running = True
##
# Stops the scheduler's main thread
# No tasks can be run after this method is called
def stopScheduler(self):
if self.mainThread is None:
raise RuntimeError("Trying to stop a None Thread")
if not self.mainThread.isAlive():
raise RuntimeError("Trying to stop a Thread that wasn't started")
self.mainThread.stopThread()
self.running = False
##
# Adds a task to be executed
def addTask(self, task):
if not self.running:
raise RuntimeError("Tried to add a task to a stopped scheduler")
if datetime.now() > task.timestamp:
raise RuntimeError("Tried to schedule a task that should have already been run")
heappush(self.taskList, task)
##
# Returns if the scheduler is still running
def isAlive(self):
return self.running
##
# This is the main thread of the TaskScheduler
class MainSchedulerThread(threading.Thread):
##
# Main init
# @param A TaskScheduler to pull Tasks from
def __init__(self, taskScheduler):
threading.Thread.__init__(self)
self.tasks = taskScheduler.taskList
self.stop = False
self.daemon = True
global maxThreads
self.pool = []
for a in range(maxThreads):
t = TaskThread()
t.start()
self.pool.append(t)
##
# Main method, starts checking for new tasks to run
def run(self):
while not self.stop:
while True:
if len(self.tasks) is 0:
break
if datetime.now() < self.tasks[0].timestamp:
#If it should be run
#Run it after poping
task = heappop(self.tasks)
global maxThreads
for a in range(maxThreads):
result = self.pool[a].runTask(task)
if result:
break #Task was added
else:
pass #Thread already had a task, check next
#Check if it needs to reoccur
if task.recurring:
task.timestamp = task.calculateTimestamp()
heappush(self.tasks, task)
else:
break
#After breaking, all tasks that should be run are now running or queued, sleep for 1 min
time.sleep(60)
#When we are stopping, join worker threads, they are already marked as stopped
for a in range(maxThreads):
self.pool[a].join()
##
# Stops the exectuion of the scheduler after the next task check, this call will not block
# Call isAlive() to check for when it is stopped
def stopThread(self):
for a in range(maxThreads):
self.pool[a].stopThread()
self.stop = True
##
# This a task thread
class TaskThread(threading.Thread):
##
# Main init
def __init__(self):
threading.Thread.__init__(self)
self.busy = False
self.task = None
self.stop = False
self.daemon = True
##
# Runs the thread in a loop running any given task
def run(self):
while not self.stop:
if self.busy: # Has task
if self.task is None:
self.busy = False
else:
self.task.state = TaskState.RUNNING
self.task.run(datetime.now())
self.task.state = TaskState.FINISHED
self.task = None
else:
time.sleep(1)
##
# Runs the given task, returns False if we already have a task
# @param task The task to run
def runTask(self,task):
if self.busy:
if self.task is not None:
return False
self.task = task
self.busy = True
return True
##
# Stops the TaskThread, returns a task object if one exists
def stopThread(self):
self.stop = True
return self.task
| mit | -3,657,932,361,293,457,000 | 27.311321 | 123 | 0.704432 | false | 3.492242 | false | false | false |
pombo-lab/gamtools | lib/gamtools/tests/test_segregation.py | 1 | 2934 | from gamtools import segregation
import io
import pytest
fixture_two_samples = io.StringIO(
u"""chrom start stop Sample_A Sample_B
chr1 0 50000 0 0
chr1 50000 100000 0 0
chr1 100000 150000 0 0
chr1 150000 200000 0 0
chr1 200000 250000 0 0
chr1 250000 300000 0 0
chr1 300000 350000 0 0
chr1 350000 400000 0 0
chr1 400000 450000 0 0
""")
data_two_samples = segregation.open_segregation(fixture_two_samples)
#########################################
#
# segregation.index_from_interval tests
#
#########################################
def test_interval_within_bin():
interval = 'chr1', 50100, 50300
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
assert len(found_windows) == 1
print(found_windows)
found_chrom, found_start, found_stop = found_windows[0]
assert found_chrom == 'chr1'
assert found_start == 50000
assert found_stop == 100000
def test_interval_is_bin():
interval = 'chr1', 50000, 100000
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
assert len(found_windows) == 1
print(found_windows)
found_chrom, found_start, found_stop = found_windows[0]
assert found_chrom == 'chr1'
assert found_start == 50000
assert found_stop == 100000
def test_interval_overlaps_two_bins():
interval = 'chr1', 50500, 100500
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
print(found_windows)
assert len(found_windows) == 2
assert found_windows[0] == ('chr1', 50000, 100000)
assert found_windows[-1] == ('chr1', 100000, 150000)
def test_interval_overlaps_many_bins():
interval = 'chr1', 50500, 300500
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
print(found_windows)
assert len(found_windows) == 6
assert found_windows[0] == ('chr1', 50000, 100000)
assert found_windows[-1] == ('chr1', 300000, 350000)
def test_interval_end_before_start():
interval = 'chr1', 300500, 50500
with pytest.raises(ValueError):
segregation.index_from_interval(data_two_samples, interval)
def test_invalid_chromosome():
interval = 'chr3', 50000, 100000
with pytest.raises(segregation.InvalidChromError):
segregation.index_from_interval(data_two_samples, interval)
| apache-2.0 | 8,695,599,764,419,689,000 | 36.139241 | 79 | 0.611111 | false | 3.509569 | true | false | false |
levilucio/SyVOLT | t_core/Mutex/STS.py | 1 | 5702 |
from t_core.composer import Composer
from tc_python.arule import ARule
from tc_python.srule import SRule
from tc_python.frule import FRule
from HGiveRuleLHS import HGiveRuleLHS
from HGiveRuleRHS import HGiveRuleRHS
from HMountRuleLHS import HMountRuleLHS
from HMountRuleRHS import HMountRuleRHS
from HNewRuleLHS import HNewRuleLHS
from HNewRuleRHS import HNewRuleRHS
from HReleaseRuleLHS import HReleaseRuleLHS
from HReleaseRuleRHS import HReleaseRuleRHS
from HRequestRuleLHS import HRequestRuleLHS
from HRequestRuleRHS import HRequestRuleRHS
from HTakeRuleLHS import HTakeRuleLHS
from HTakeRuleRHS import HTakeRuleRHS
from HGiveRulePivotLHS import HGiveRulePivotLHS
from HGiveRulePivotRHS import HGiveRulePivotRHS
from HReleaseRulePivotLHS import HReleaseRulePivotLHS
from HReleaseRulePivotRHS import HReleaseRulePivotRHS
from HTakeRulePivotLHS import HTakeRulePivotLHS
from HTakeRulePivotRHS import HTakeRulePivotRHS
class ShortTransformationSequence(Composer):
def __init__(self, N, debug_folder=''):
super(ShortTransformationSequence, self).__init__()
self.length = 0
self.debug_suffix = 'sts'
self.debug_folder = debug_folder
self.N = N
self.NewRule = SRule(HNewRuleLHS(), HNewRuleRHS(), max_iterations=N - 2, ignore_resolver=True)
self.MountRule = ARule(HMountRuleLHS(), HMountRuleRHS(), ignore_resolver=True)
self.RequestRule = FRule(HRequestRuleLHS(), HRequestRuleRHS(), max_iterations=N, ignore_resolver=True)
self.TakeRule = ARule(HTakeRuleLHS(), HTakeRuleRHS(), ignore_resolver=True)
self.ReleaseRule = ARule(HReleaseRuleLHS(), HReleaseRuleRHS(), ignore_resolver=True)
self.GiveRule = ARule(HGiveRuleLHS(), HGiveRuleRHS(), ignore_resolver=True)
def packet_in(self, packet):
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# New Processes
packet = self.NewRule.packet_in(packet)
packet.clean()
if not self.NewRule.is_success:
if self.NewRule.exception is not None:
self.exception = self.NewRule.exception
return packet
self.length += self.NewRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Mount
packet = self.MountRule.packet_in(packet)
packet.clean()
if not self.MountRule.is_success:
if self.MountRule.exception is not None:
self.exception = self.MountRule.exception
return packet
self.length += self.MountRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Request
packet = self.RequestRule.packet_in(packet)
packet.clean()
if not self.RequestRule.is_success:
if self.RequestRule.exception is not None:
self.exception = self.RequestRule.exception
return packet
self.length += self.RequestRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Pass it around
for _ in range(self.N):
# Take
packet = self.TakeRule.packet_in(packet)
packet.clean()
if not self.TakeRule.is_success:
if self.TakeRule.exception is not None:
self.exception = self.TakeRule.exception
return packet
self.length += self.TakeRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Release
packet = self.ReleaseRule.packet_in(packet)
packet.clean()
if not self.ReleaseRule.is_success:
if self.ReleaseRule.exception is not None:
self.exception = self.ReleaseRule.exception
return packet
self.length += self.ReleaseRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Give
packet = self.GiveRule.packet_in(packet)
packet.clean()
if not self.GiveRule.is_success:
if self.GiveRule.exception is not None:
self.exception = self.GiveRule.exception
return packet
self.length += self.GiveRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
self.is_success = True
return packet
class ShortTransformationSequencePivot(ShortTransformationSequence):
def __init__(self, N, debug_folder=''):
super(ShortTransformationSequencePivot, self).__init__(N, debug_folder)
self.debug_suffix = 'sts_pivot'
self.TakeRule = ARule(HTakeRulePivotLHS(), HTakeRulePivotRHS(), ignore_resolver=True)
self.ReleaseRule = ARule(HReleaseRulePivotLHS(), HReleaseRulePivotRHS(), ignore_resolver=True)
self.GiveRule = ARule(HGiveRulePivotLHS(), HGiveRulePivotRHS(), ignore_resolver=True)
| mit | 3,004,954,829,868,638,700 | 46.322034 | 150 | 0.647141 | false | 3.648113 | false | false | false |
dolphinziyo/LocalizarPatron | localizarPatron.py | 1 | 6066 | # coding=ISO-8859-1
'''
Creado el 10/06/2011
@autor: dolphinziyo
http://twitter.com/dolphinziyo
Web: http://tecnogame.org
'''
# Este programa sirve para buscar ciertos patrones en archivos de texto plano,
# Su forma de uso es muy sencilla, requiere de la introducción del patrón a buscar,
# el directorio en el que se realizará la búsqueda y la extensión de los
# ficheros en los que se buscará. El directorio y la extensión son parámetros
# opcionales, si no se introducen se buscará en el directorio actual, en cual quiera
# que sea la extensión de los ficheros, respectivamente.
#
# encontrarPatron patron [directorio] [extensión]
#
# Se requiere Python 2.7
# Módulos
import string,os,sys
from subprocess import call
# Constantes
# Clases
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Funciones
# ---------------------------------------------------------------------
def encontrar(patron,carpeta,extension): # Función que busca lo que se envía desde el main
cmd = 'find ' + carpeta+ ' -name "*' + extension + '" -print 2> /dev/null' # Comando a ejecutar
for file in os.popen(cmd).readlines(): #Por cada fichero encontrado dentro del comando cargamos las lineas
num = 1
name = file[:-1] # Almacenamos el nombre del fichero
if os.path.isdir(name) == False: # Comprobamos que lo analizado no sea un directorio
try: #Tratamiento de excepciones
for line in open(name).readlines(): # Realizamos la comparación línea por línea
pos = string.find(line,patron) # Recogemos la posición en la línea del patrón buscado
if pos>=0: # Si la posición es mayor que 0
print "Fichero: ",name,"Línea: ",num,"Posición: ",pos # Imprimimos el nombre del fichero, la línea dónde se ha encontrado el patrón y la posición concreta
print '....',line[:-1] # Imprimimos la línea concreta
print '....', ' '*pos + '*', '\n' # Indicamos con un * la posición exacta
print '----------------------------------------------------' # Usamos una línea de guiones para delimitar los datos mostrados
num+=1 # Aumentamos el número de línea
except:
print "El archivo " + name + " no se puede leer" # Mostramos un mensaje de error para aquellos archivos que no pueden ser abiertos
return True;
def options():
print "Uso:\t localizarPatron patron [directorio] [extensión]" # Mensaje explicativo sobre cómo utilizar el programa
return
def keyInterrupt():
print "\nSe ha interrumpido la ejecución del programa" # Mensaje que se muestra el interrumpir la ejecución del programa desde el teclado (por el usuario)
return
# ---------------------------------------------------------------------
def main():
if len(sys.argv)==1: # Si no se ha introducido el patrón a buscar
options() # Mostramos un mensaje sobre el usi del programa
elif len(sys.argv)==2: # Si sólo hay 2 parámetros pasados
try:
patron = sys.argv[1] # Obtenemos el patrón a buscar
carpeta = "." # Establecemos el directorio actual para la búsqueda
ext = "" # Y dejamos la extensión vacía (buscar en todos los ficheros)
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
elif len(sys.argv)==3: # En caso de haber tres parámetros introducidos
try:
patron = sys.argv[1] # Recogemos el patrón a buscar
carpeta = sys.argv[2] # Recogemos el PATH dónde se encuentran los ficheros en los que realizaremos la búsqueda
ext = "" # Dejamos la extensión vacía para buscar en todos los ficheros
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
elif len(sys.argv)==4: # En caso de haber cuatro parámetros introducidos
try:
patron = sys.argv[1] # Recogemos el patrón a buscar
carpeta = sys.argv[2] # Recogemos el PATH dónde se encuentran los ficheros en los que realizaremos la búsqueda
ext = sys.argv[3] # Recogemos la extensión
ext = "." + ext # Le concatenamos el punto delante (para que el usuario no tenga que introducirlo)
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
else: # Si se han introducido más parámetros de los necesarios
options() # Mostramos el mensaje de "uso" del programa
if __name__ == "__main__":
main()
| unlicense | -8,836,842,663,022,820,000 | 64.130435 | 189 | 0.54723 | false | 3.530937 | false | false | false |
eponvert/upparse | scripts/chunk-pattern-bench.py | 1 | 1167 | #!/usr/bin/env python
import sys
for line in sys.stdin:
line = line.split()
i = 0
n = len(line)
print '(',
while i < n:
if line[i] == ';':
i += 1
elif line[i] in ('DT','CD'):
start = i
i += 1
while i < n and line[i] == 'JJ':
i += 1
while i < n and line[i] in ('NN','NNS'):
i += 1
if i - start > 1:
j = start
print '(',
for j in range(start,i):
print line[j],
print ')',
# elif line[i] in ('JJ', 'NN','NNS'):
# start = i
# i += 1
#
# if line[i-1] == 'JJ':
# while i < n and line[i] == 'JJ':
# i += 1
#
# while i < n and line[i] in ('NN','NNS'):
# i += 1
#
# if i - start > 1:
# j = start
# print '(',
# for j in range(start,i):
# print line[j],
# print ')',
elif line[i] == 'NNP':
start = i
i += 1
while i < n and line[i] == 'NNP':
i += 1
if i - start > 1:
print '(',
for j in range(start,i):
print line[j],
print ')',
elif i < n:
print line[i],
i += 1
print ')'
| apache-2.0 | -3,052,442,185,311,247,000 | 16.41791 | 47 | 0.379606 | false | 2.969466 | false | false | false |
ndssl/zika_data_to_cdc | src/buzzfeed/clean_parsed_brazil.py | 1 | 4095 | """Clean the Brazil data from BuzzFeed
https://github.com/BuzzFeedNews/zika-data
"""
import os.path
from glob import glob
import re
import pandas as pd
import unicodedata
import numpy as np
def strip_accents(s):
return(''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'))
def load_data(filepath):
df = pd.read_csv(filepath)
return(df)
def get_report_date(filepath,
dir_delim='/',
file_name_position=-1,
date_pattern=r'\d{4}-\d{2}-\d{2}',
date_result_position=0):
file_name = filepath.split(dir_delim)[file_name_position]
return(re.findall(date_pattern, file_name)[date_result_position])
def get_cdc_places_match(df, cdc_places_df, df_col_name, cdc_places_col_name):
match = cdc_places_df[cdc_places_df[
cdc_places_col_name].isin(df[df_col_name])]
return(match)
def get_location(df, cdc_location_df):
location = get_cdc_places_match(df, cdc_location_df,
'state_no_accents', 'state_province')
return(location['location'].reset_index(drop=True))
def get_location_type(df, cdc_location_df):
location_type = get_cdc_places_match(df, cdc_location_df,
'state_no_accents', 'state_province')
return(location_type['location_type'].reset_index(drop=True))
def clean_data(df):
df['state_no_accents'] = df['state'].apply(strip_accents)
df = df.replace(r'\s*[Ss]em registros\s*', np.nan, regex=True)
return df
def get_cdc_data_field_code(cdc_data_guide_df, cdc_str):
return(cdc_data_guide_df[cdc_data_guide_df['data_field'] ==
cdc_str]['data_field_code'].values[0])
def main():
here = os.path.abspath(os.path.dirname(__file__))
recodes = pd.read_csv(os.path.join(
here, '../../data/buzzfeed_recodes.csv'))
cdc_brazil_places_df = pd.read_csv(os.path.join(
here, '../../../zika/Brazil/BR_Places.csv'))
cdc_brazil_data_guide_df = pd.read_csv(os.path.join(
here, '../../../zika/Brazil/BR_Data_Guide.csv'))
cdc_brazil_data_guide_right = cdc_brazil_data_guide_df[
['data_field_code', 'data_field', 'unit', 'time_period_type']]
buzzfeed_brazil_datasets = glob(
'../../../zika-data/data/parsed/brazil/*.csv')
num_data_sets = len(buzzfeed_brazil_datasets)
for i, brazil_dataset in enumerate(buzzfeed_brazil_datasets):
print("Cleaning dataset {} of {}".format(i + 1, num_data_sets))
df = load_data(brazil_dataset)
df = clean_data(df)
report_date = get_report_date(brazil_dataset)
location = get_location(df, cdc_brazil_places_df)
location_type = get_location_type(df, cdc_brazil_places_df)
df['report_date'] = report_date
df['location'] = location
df['location_type'] = location_type
df['time_period'] = 'NA'
melt_value_vars = [c for c in df.columns if re.search(
'^cases|^microcephaly', c)]
df = pd.melt(df, id_vars=[ # 'no', 'state',
'report_date', 'location', 'location_type',
'time_period'],
value_vars=melt_value_vars,
var_name='data_field_original',
value_name='value')
df = pd.merge(df, recodes,
left_on='data_field_original', right_on='buzzfeed')
df = pd.merge(df, cdc_brazil_data_guide_right,
left_on='cdc', right_on='data_field')
# `cases_reported_total` is not a field in the CDC guidelines
# this value was a row sum of the other counts and could always
# be recalculated
df = df.loc[df['data_field'] != 'cases_reported_total']
# clean up before export
df = df.drop(['buzzfeed', 'cdc'], axis=1)
df = df.fillna('NA')
df_file_path = os.path.join(
here, '..', '..', 'output', brazil_dataset.split('/')[-1])
df.to_csv(df_file_path, index=False)
if __name__ == "__main__":
main()
| mit | 7,927,917,207,281,371,000 | 32.024194 | 78 | 0.58486 | false | 3.142748 | false | false | false |
scramblingbalam/Alta_Real | twit_auth.py | 1 | 3762 | # from:
# http://piratefache.ch/twitter-streaming-api-with-tweepy/
# This is just a place folder all referennces to this code in other scripts
# Will be changed to twit_auths but I have kept this in the repo for explication
class authentication1:
def __init__(self):
self.consumer_key = "######################################"
self.consumer_secret = "#######################################"
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
self.access_token = "###########################################"
self.access_token_secret = "#######################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication2:
def __init__(self):
self.consumer_key = "#############################"
self.consumer_secret = "####################################"
self.access_token = "##########################################"
self.access_token_secret = "################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication3:
def __init__(self):
self.consumer_key = "###################################"
self.consumer_secret = "##########################################"
self.access_token = "######################################"
self.access_token_secret = "###############################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication4:
def __init__(self):
self.consumer_key = "######################"
self.consumer_secret = "##################################"
self.access_token = "####################################"
self.access_token_secret = "#######################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication5:
def __init__(self):
self.consumer_key = "########################################"
self.consumer_secret = "#########################################"
self.access_token = "################################################"
self.access_token_secret = "#############################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
| mit | -1,598,235,848,565,647,600 | 42.744186 | 83 | 0.475811 | false | 4.937008 | false | false | false |
hakuliu/inf552 | hw3/fastmapblog.py | 1 | 6167 | #!/usr/bin/env python
# This implements the FastMap algorithm
# for mapping points where only the distance between them is known
# to N-dimension coordinates.
# The FastMap algorithm was published in:
#
# FastMap: a fast algorithm for indexing, data-mining and
# visualization of traditional and multimedia datasets
# by Christos Faloutsos and King-Ip Lin
# http://portal.acm.org/citation.cfm?id=223812
# This code made available under the BSD license,
# details at the bottom of the file
# Copyright (c) 2009, Gunnar Aastrand Grimnes
import math
import random
# need scipy as usual
import scipy
# we will repeat the pick-pivot points heuristic this many times
# a higher value means "better" results, but 1 also works well
DISTANCE_ITERATIONS=1
class FastMap:
def __init__(self, dist,verbose=False):
if dist.max()>1:
dist/=dist.max()
self.dist=dist
self.verbose=verbose
def _furthest(self, o):
mx=-1000000
idx=-1
for i in range(len(self.dist)):
d=self._dist(i,o, self.col)
if d>mx:
mx=d
idx=i
return idx
def _pickPivot(self):
"""Find the two most distant points"""
o1=random.randint(0, len(self.dist)-1)
o2=-1
i=DISTANCE_ITERATIONS
while i>0:
o=self._furthest(o1)
if o==o2: break
o2=o
o=self._furthest(o2)
if o==o1: break
o1=o
i-=1
self.pivots[self.col]=(o1,o2)
return (o1,o2)
def _map(self, K):
if K==0: return
px,py=self._pickPivot()
if self.verbose: print "Picked %d,%d at K=%d"%(px,py,K)
if self._dist(px,py,self.col)==0:
return
for i in range(len(self.dist)):
self.res[i][self.col]=self._x(i, px,py)
self.col+=1
self._map(K-1)
def _x(self,i,x,y):
"""Project the i'th point onto the line defined by x and y"""
dix=self._dist(i,x,self.col)
diy=self._dist(i,y,self.col)
dxy=self._dist(x,y,self.col)
return (dix + dxy - diy) / 2*math.sqrt(dxy)
def _dist(self, x,y,k):
"""Recursively compute the distance based on previous projections"""
if k==0: return self.dist[x,y]**2
rec=self._dist(x,y, k-1)
resd=(self.res[x][k] - self.res[y][k])**2
return rec-resd
def map(self, K):
self.col=0
self.res=scipy.zeros((len(self.dist),K))
self.pivots=scipy.zeros((K,2),"i")
self._map(K)
return self.res
def fastmap(dist, K):
"""dist is a NxN distance matrix
returns coordinates for each N in K dimensions
"""
return FastMap(dist,True).map(K)
# Below here are methods for testing
def vlen(x,y):
return math.sqrt(sum((x-y)**2))
def distmatrix(p, c=vlen):
dist=scipy.zeros((len(p),len(p)))
for x in range(len(p)):
for y in range(x,len(p)):
if x==y: continue
dist[x,y]=c(p[x], p[y])
dist[y,x]=dist[x,y]
return dist
def distortion(d1,d2):
return scipy.sum(((d1/d1.max())-(d2/d2.max()))**2)/d1.size
def distortiontest():
points=[]
n=10
mean=10
dim=5
print "Generating %d %d-D points randomly distributed between [0-%d]"%(n,dim,mean)
for i in range(n):
points.append(scipy.array([random.uniform(0,mean) for x in range(dim)]))
print "Computing distance matrix"
dist=distmatrix(points)
print "Mapping"
p1=fastmap(dist,1)
print "K=1"
print "Distortion: ", distortion(distmatrix(p1),dist)
p2=fastmap(dist,2)
print "K=2"
print "Distortion: ", distortion(distmatrix(p1),dist)
p3=fastmap(dist,3)
print "K=3"
print "Distortion: ", distortion(distmatrix(p3),dist)
import pylab
pylab.scatter([x[0]/mean for x in points], [x[1]/mean for x in points],s=50)
pylab.scatter([x[0] for x in p2], [x[1] for x in p2], c="r")
pylab.show()
def stringtest():
import Levenshtein
strings=[ "acting", "activist", "compute", "coward", "forward", "interaction", "activity", "odor", "order", "international" ]
dist=distmatrix(strings, c=lambda x,y: 1-Levenshtein.ratio(x,y))
p=fastmap(dist,2)
import pylab
pylab.scatter([x[0] for x in p], [x[1] for x in p], c="r")
for i,s in enumerate(strings):
pylab.annotate(s,p[i])
pylab.title("Levenshtein distance mapped to 2D coordinates")
pylab.show()
if __name__=='__main__':
stringtest()
#distortiontest()
# Copyright (c) 2009, Gunnar Aastrand Grimnes
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| apache-2.0 | 6,542,593,122,165,147,000 | 29.98995 | 757 | 0.632398 | false | 3.435655 | false | false | false |
geosolutions-it/cread-workflows | publishMosaicGranule.py | 1 | 1830 | import requests
import json
# The data entry form
##Geoserver_URL=string http://host[:port]/geoserver
# test -> ##Geoserver_URL=string http://192.168.50.171:8080/geoserver
##Username=string your_username
##Password=string your_password
##Image_mosaic_Store_Name=string a datastore name
# ##Image_mosaic_Store_Name=string countryMosaic
##Mosaic_Granule_to_add=vector
usr=Username
pswd=Password
geoserver_url=Geoserver_URL
store_name=Image_mosaic_Store_Name
granule_abs_path=Mosaic_Granule_to_add
base_rest_path="/rest/imports"
print "STEP 1 - Creating a new importer for the datastore: '" + store_name + "'..."
headers = {'content-type': 'application/json'}
jsonRunImporter = '{"import": {"targetWorkspace": {"workspace": {"name": "geonode"}},"targetStore": {"dataStore": {"name": "' + str(store_name) + '"}}}}'
print(jsonRunImporter)
url = geoserver_url + base_rest_path
r = requests.post(url, jsonRunImporter, auth=(usr, pswd), headers=headers)
print(r.text)
data = json.loads(r.text)
importerId = data["import"]["id"]
print "...importer successfuly created! importerId:'" + str(importerId) + "'"
print ""
print "STEP 2 - Going to load from filesystem the geotif to upload..."
upload = {'files': ('country.tiff', open(granule_abs_path, "rb"), 'application/octet-stream')}
print "...geotif successfuly loaded! ready to create a run a task for the importer " + str(importerId) + "..."
url += "/" + str(importerId) + "/tasks"
r = requests.post(url, files=upload, auth=(usr, pswd))
print "...task created! taskId: '" + ""
print ""
data = json.loads(r.text)
taskId = data["task"]["id"]
print "STEP 3 - Importer: '" + str(importerId) +"' run taskId: '" + str(taskId) + "'"
url = url = geoserver_url + base_rest_path + "/" + str(importerId)
print str(url)
r = requests.post(url, auth=(usr, pswd))
print "task started!"
| apache-2.0 | -168,081,882,520,736,580 | 32.272727 | 153 | 0.691257 | false | 3.05 | false | false | false |
FroggiesareFluffy/ForestOfSquirrels | forestofsquirrels/squirrels/squirrel.py | 1 | 9147 | import math
import pygame
import forestofsquirrels.trees
pygame.init()
class SpeechBubble(pygame.sprite.Sprite):
font = pygame.font.Font("freesansbold.ttf", 20)
topleft = pygame.image.load("forestofsquirrels/graphics/corner.png")
topright = pygame.transform.flip(topleft, True, False)
bottomleft = pygame.transform.flip(topleft, False, True)
bottomright = pygame.transform.flip(topleft, True, True)
def __init__(self, forest, squirrel, message):
pygame.sprite.Sprite.__init__(self, forest)
self.message = message
self.squirrel = squirrel
image = self.font.render(self.message, False, (0, 0, 0), (255, 255, 255))
self.rect = image.get_rect().inflate(4, 4)
self.image = pygame.Surface((self.rect.width, self.rect.height))
self.image.blit(image, (2, 2))
self.image.blit(self.topleft, (0, 0))
self.image.blit(self.topright, (self.rect.width - 4, 0))
if self.squirrel.image == self.squirrel.leftimg:
self.rect.bottomright = self.squirrel.rect.topleft
self.image.blit(self.bottomleft, (0, self.rect.height - 4))
else:
self.rect.bottomleft = self.squirrel.rect.topright
self.image.blit(self.bottomright, (self.rect.width - 4, self.rect.height - 4))
self.updated = 0
self.y = self.squirrel.y
def update(self):
self.y = self.squirrel.y
image = self.font.render(self.message, False, (0, 0, 0), (255, 255, 255))
self.rect = image.get_rect().inflate(4, 4)
self.image = pygame.Surface((self.rect.width, self.rect.height))
self.image.blit(image, (2, 2))
self.image.blit(self.topleft, (0, 0))
self.image.blit(self.topright, (self.rect.width - 4, 0))
if self.squirrel.image == self.squirrel.leftimg:
self.rect.bottomright = self.squirrel.rect.topleft
self.image.blit(self.bottomleft, (0, self.rect.height - 4))
else:
self.rect.bottomleft = self.squirrel.rect.topright
self.image.blit(self.bottomright, (self.rect.width - 4, self.rect.height - 4))
self.updated += 1
if self.updated > 60:
self.kill()
class Squirrel(pygame.sprite.Sprite):
""" Base class for squirrels.
"""
def __init__(self, forest, x, y):
self.x = x
self.y = y
self.z = 0
self.xoffset = 0
self.yoffset = 0
self.climbing = None
self.hoppingLeft = False
self.hoppingRight = False
self.hoppingUp = False
self.hoppingDown = False
self.goingLeft = False
self.goingRight = False
self.goingUp = False
self.goingDown = False
self.hopstep = -1
self.leftimg = pygame.image.load("forestofsquirrels/graphics/squirrel.png").convert_alpha()
self.rightimg = pygame.transform.flip(self.leftimg, True, False)
self.leftrunimg = pygame.image.load("forestofsquirrels/graphics/runningsquirrel.png").convert_alpha()
self.rightrunimg = pygame.transform.flip(self.leftrunimg, True, False)
self.image = self.leftimg
self.rect = self.image.get_rect()
self.colliderect = self.rect
self.level = 0
pygame.sprite.Sprite.__init__(self, forest)
self.forest = forest
self.can_climb = None
self.acorn = False
self.health = 8
def startright(self):
self.goingRight = True
self.goingLeft = False
self.image = self.rightrunimg
self.hopstep = max(self.hopstep, 0)
def stopright(self):
self.goingRight = False
def startleft(self):
self.goingLeft = True
self.goingRight = False
self.image = self.leftrunimg
self.hopstep = max(self.hopstep, 0)
def stopleft(self):
self.goingLeft = False
def startup(self):
self.goingUp = True
self.goingDown = False
self.hopstep = max(self.hopstep, 0)
def stopup(self):
self.goingUp = False
def startdown(self):
self.goingDown = True
self.goingUp = False
self.hopstep = max(self.hopstep, 0)
def stopdown(self):
self.goingDown = False
def start_climbing(self):
self.climbing = self.can_climb
def stop_climbing(self):
if self.z < 10:
self.climbing = None
def say(self, message):
SpeechBubble(self.forest, self, message)
def on_space(self, window, clock):
if self.climbing:
for hole in self.climbing[0].holes:
if hole[0] == self.climbing[1] and hole[1] < self.z < hole[2]:
area = __import__("forestofsquirrels.world.rooms." + hole[3], fromlist=["main"])
area.main(self, window, clock)
return True
if self.z == self.climbing[0].maxheight:
self.acorn = True
return False
def update(self):
if not self.climbing:
self.yoffset = 0
if self.hopstep >= 0:
if self.hopstep == 0:
self.hoppingLeft = self.goingLeft
self.hoppingRight = self.goingRight
self.hoppingDown = self.goingDown
self.hoppingUp = self.goingUp
self.hopstep += 1
self.z = math.sin(self.hopstep * math.pi / 10) * 10
if self.hopstep == 10:
if self.goingRight or self.goingLeft or self.goingUp or self.goingDown:
self.hopstep = 0
else:
if self.hoppingLeft:
self.image = self.leftimg
elif self.hoppingRight:
self.image = self.rightimg
self.hopstep = -1
self.hoppingLeft = False
self.hoppingRight = False
self.hoppingUp = False
self.hoppingDown = False
if self.hoppingRight:
self.x += 3
elif self.hoppingLeft:
self.x -= 3
if self.hoppingUp:
self.y -= 2
elif self.hoppingDown:
self.y += 2
self.colliderect = pygame.Rect(self.x, self.y, 18, 18)
self.can_climb = None
for tree in filter(lambda s: isinstance(s, forestofsquirrels.trees.Tree), self.forest.sprites()):
if tree.colliderect.colliderect(self.colliderect):
overlap = self.colliderect.union(tree.colliderect)
xoffset, yoffset = overlap.width, overlap.height
if self.hoppingDown and self.colliderect.bottom < tree.colliderect.bottom and (
xoffset > yoffset or not (self.hoppingLeft or self.hoppingRight)):
self.colliderect.bottom = tree.colliderect.top
elif self.hoppingUp and self.colliderect.top > tree.colliderect.top and (
xoffset > yoffset or not (self.hoppingLeft or self.hoppingRight)):
self.colliderect.top = tree.colliderect.bottom
elif self.hoppingLeft and (xoffset < yoffset or not (self.hoppingUp or self.hoppingDown)):
self.colliderect.left = tree.colliderect.right
self.climbing = [tree, "right"]
elif self.hoppingRight and (xoffset < yoffset or not (self.hoppingUp or self.hoppingDown)):
self.colliderect.right = tree.colliderect.left
self.climbing = [tree, "left"]
self.x, self.y = self.colliderect.topleft
else:
if self.goingRight:
if self.climbing[1] == "right":
self.z -= 2
self.image = pygame.transform.rotate(self.rightrunimg, -90)
if self.z <= 18:
self.climbing = None
self.image = self.rightrunimg
else:
if self.z < self.climbing[0].maxheight:
self.z += 2
else:
self.z = self.climbing[0].maxheight
self.image = pygame.transform.rotate(self.rightrunimg, 90)
elif self.goingLeft:
if self.climbing[1] == "right":
if self.z < self.climbing[0].maxheight:
self.z += 2
else:
self.z = self.climbing[0].maxheight
self.image = pygame.transform.rotate(self.leftrunimg, -90)
else:
self.z -= 2
self.image = pygame.transform.rotate(self.leftrunimg, 90)
if self.z <= 18:
self.climbing = None
self.image = self.leftrunimg
self.yoffset = -self.z
self.rect = pygame.Rect(self.x + self.xoffset, self.y + self.yoffset, 18, 18)
| unlicense | -4,934,933,280,678,695,000 | 40.202703 | 111 | 0.545971 | false | 3.722833 | false | false | false |
tomviner/micro-bit-examples | servo4.py | 1 | 4885 | import microbit
def rescale(src_scale, dest_scale, x):
"""Map one number scale to another
For example, to convert a score of 4 stars out of 5 into a percentage:
>>> rescale((0, 5), (0, 100), 4)
80.0
Great for mapping different input values into LED pixel brightnesses!
"""
src_start, src_end = src_scale
# what proportion along src_scale x is:
proportion = 1.0 * (x - src_start) / (src_end - src_start)
dest_start, dest_end = dest_scale
# apply our proportion to the dest_scale
return proportion * (dest_end - dest_start) + dest_start
UNKNOWN = type('_', (), {'__str__': lambda _: 'UNKNOWN'})
UNKNOWN_ANGLE = UNKNOWN()
DUTY_0PC = 0
DUTY_100PC = 1023
class Servo:
"""
Futaba S3003 - Servo
Control System: +Pulse Width Control 1520usec Neutral
Required Pulse: 3-5 Volt Peak to Peak Square Wave
Operating Voltage: 4.8-6.0 Volts
Operating Temperature Range: -20 to +60 Degree C
Operating Speed (4.8V): 0.23sec/60 degrees at no load
Operating Speed (6.0V): 0.19sec/60 degrees at no load
Stall Torque (4.8V): 44 oz/in. (3.2kg.cm)
Stall Torque (6.0V): 56.8 oz/in. (4.1kg.cm)
Operating Angle: 45 Deg. one side pulse traveling 400usec
Continuous Rotation Modifiable: Yes
Direction: Counter Clockwise/Pulse Traveling 1520-1900usec
"""
PERIOD = 20000 # microseconds
pin = microbit.pin0
min_pulse_width = 500 # microseconds
mid_pulse_width = 1520
max_pulse_width = 3000
min_deg = 0 # degrees
mid_deg = 90
max_deg = 180
max_on_time = 1200 # milliseconds
clockwise_speed_factor = 0.85
def __init__(self):
print('Initialise PWM to {} μs {:.0f} ms {:.0f} Hz'.format(
self.PERIOD, self.PERIOD/1000, 1000000./self.PERIOD))
self.pin.set_analog_period_microseconds(self.PERIOD)
self.angle = UNKNOWN_ANGLE
self.point(self.mid_deg)
def deg_to_pulse_width(self, deg):
return rescale(
(self.min_deg, self.max_deg),
(self.max_pulse_width, self.min_pulse_width),
deg
)
def pulse_width_to_duty_cycle_value(self, pulse_width):
return rescale(
(0, self.PERIOD),
(DUTY_0PC, DUTY_100PC),
pulse_width
)
def deg_to_duty_cycle_value(self, deg):
pulse_width = self.deg_to_pulse_width(deg)
assert self.min_pulse_width <= pulse_width <= self.max_pulse_width
print('\tpulse width {:.0f} μs'.format(pulse_width))
duty_cycle_value = self.pulse_width_to_duty_cycle_value(pulse_width)
percent = rescale((0, DUTY_100PC), (0, 100), duty_cycle_value)
print('\tduty cycle {:.0f}/{} ({:.1f}%)'.format(
duty_cycle_value, DUTY_100PC, percent))
return duty_cycle_value
def calc_on_time(self, deg):
"""
Operating Speed (4.8V): 0.23sec/60 degrees at no load
ms_per_deg = 230 / 60.
"""
# from observations:
ms_per_deg = 600 / 90.
if self.angle is UNKNOWN_ANGLE:
return self.max_on_time / 2.
is_clockwise = self.angle < deg
travel = abs(deg - self.angle)
on_time = travel * ms_per_deg
if is_clockwise:
on_time *= self.clockwise_speed_factor
assert on_time <= self.max_on_time
return on_time
def wait_and_display_pwm(self, duty_cycle_value, on_time):
start = microbit.running_time()
width = round(on_time / 15)
hits = range(0, width, round(DUTY_100PC / duty_cycle_value))
points = [('#' if i in hits else '.') for i in range(width)]
while True:
microbit.sleep(1)
duration = microbit.running_time() - start
progress_left = 1 - (duration / on_time)
points_left = int((width * progress_left)) + 1
while points and len(points) > points_left:
point = points.pop(0)
print(point, end='', flush=True)
if duration >= on_time:
break
print()
def pulse_burst(self, duty_cycle_value, on_time):
try:
microbit.pin0.write_analog(duty_cycle_value)
self.wait_and_display_pwm(duty_cycle_value, on_time)
finally:
# ensure we don't leave the pwm on
microbit.pin0.write_analog(0)
def point(self, deg):
print('point {}° to {}°'.format(self.angle, deg))
duty_cycle_value = self.deg_to_duty_cycle_value(deg)
on_time = self.calc_on_time(deg)
print('\ton for {:.0f} ms'.format(on_time))
self.angle = deg
self.pulse_burst(duty_cycle_value, on_time)
pause_time = 5
def demo():
servo = Servo()
for deg in 0, 180, 90, 180, 0:
servo.point(deg)
microbit.sleep(pause_time)
if __name__ == '__main__':
demo()
| apache-2.0 | 7,517,846,798,904,603,000 | 30.490323 | 76 | 0.584921 | false | 3.226041 | false | false | false |
Remper/learningbyreading | src/disambiguation.py | 1 | 3636 | from babelfy import babelfy
from ukb import wsd
from candc import postag
from spotlight import spotlight
import ConfigParser
import logging as log
from mappings import bn2dbpedia, offset2bn, bn2offset
from os.path import join, dirname
# read configuration
config = ConfigParser.ConfigParser()
config.read(join(dirname(__file__),'../config/disambiguation.conf'))
config_mapping = ConfigParser.ConfigParser()
config_mapping.read(join(dirname(__file__),'../config/mapping.conf'))
def disambiguation(tokenized, drs):
# Word Sense Disambiguation
entities = []
if config.get('wsd', 'module') == 'babelfy':
log.info("Calling Babelfy")
disambiguated = babelfy(tokenized)
synsets = disambiguated['synsets']
if config_mapping.get('net', 'module') == 'wordnet':
synsets = babelfy_to_wordnet(synsets)
if config.get('el', 'module') == 'babelfy':
log.info("Using Babelfy also for entities")
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('wsd', 'module') == 'ukb':
log.info("Calling POS-tagger")
postags = postag(tokenized)
log.info("Calling UKB")
disambiguated = wsd(postags)
synsets = disambiguated['synsets']
if config_mapping.get('net', 'module') == 'babelnet':
synsets = ubk_to_babelnet(synsets)
# Entity Linking
if config.get('el', 'module') == 'babelfy' and config.get('wsd', 'module') != 'babelfy':
log.info("Calling Babelfy")
disambiguated = babelfy(tokenized)
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('el', 'module') == 'spotlight':
log.info("Calling Spotlight")
disambiguated = spotlight(tokenized)
if not disambiguated:
return None, None
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('el', 'module') == 'none':
log.info("No module selected for entity linking")
entities = []
# enriching the entity list with WordNet mapping
'''
for synset in synsets:
offset = synset['synset'].split('/')[-1]
if offset in offset2bn:
bn = offset2bn[offset]
if bn in bn2dbpedia:
entity = bn2dbpedia[bn]
if entity != '-NA-':
uri = u'http://dbpedia.org/resource/{0}'.format(entity)
if not uri in [e['entity'] for e in entities]:
entities.append({'token_start': synset['token_start'],
'token_end': synset['token_end'],
'entity': uri})
'''
return synsets, entities
def babelfy_to_wordnet(synsets):
try:
for synset in synsets:
bn_id = synset['synset'].split('/')[-1]
if bn_id in bn2offset:
synset['synset'] = 'http://wordnet-rdf.princeton.edu/wn31/{0}'.format(bn2offset[bn_id])
else:
synset['synset'] = ''
except:
log.error("babelfy(): error linking to WordNet output")
return None
return synsets
def ubk_to_babelnet(synsets):
try:
for synset in synsets:
wn_id = synset['synset'].split('/')[-1]
if wn_id in offset2bn:
synset['synset'] = 'http://babelnet.org/rdf/{0}'.format(offset2bn[wn_id])
else:
synset['synset'] = ''
except:
log.error("UBK(): error linking to BabelNet output")
return None
return synsets
| gpl-2.0 | 8,635,342,172,460,265,000 | 35.727273 | 103 | 0.575358 | false | 3.672727 | true | false | false |
not-napoleon/mazes | braid.py | 1 | 1536 | import random
import sys
import copy
from utils import Point, Box
from walled_matrix import WalledMatrix
from growing_tree import GrowingTreeMaze
class BraidMaze(object):
"""Convert a perfect maze into a braided maze"""
def __init__(self, x_max, y_max):
self._x_max = x_max
self._y_max = y_max
gt = GrowingTreeMaze(x_max, y_max)
gt.generate()
self._matrix = gt.matrix
def generate(self):
"""Remove dead ends from the maze
"""
print self._matrix
for point, cell in self._matrix:
walls = zip(('U', 'L', 'D', 'R'), cell)
blocked = [x for x in walls if not x[1]]
if len(blocked) < 3:
# we have more than one exit, this isn't a dead end and we
# don't need to do anything
continue
print "***"
print "%s: %s" % (blocked, len(blocked))
random.shuffle(blocked)
while(blocked):
try:
self._matrix.carve(point, blocked.pop()[0])
except IndexError:
continue
break
def __str__(self):
return str(self._matrix)
def main():
"""Driver function
"""
if len(sys.argv) == 2:
seed = int(sys.argv[1])
else:
seed = random.randint(0, sys.maxint)
print "Seeding with %s" % seed
random.seed(seed)
maze = BraidMaze(50, 50)
maze.generate()
print maze
if __name__ == '__main__':
main()
| mit | 3,617,249,657,225,920,000 | 25.482759 | 74 | 0.520182 | false | 3.737226 | false | false | false |
wolffytom/tf_runet | train.py | 1 | 2824 | #!/usr/bin/env python3
import os, sys
sys.path.append( os.path.dirname(__file__ ) )
from data.vot2016 import VOT2016_Data_Provider
from config import cfg
from model import Model
import sklearn
import tensorflow as tf
import numpy as np
from PIL import Image
from meval import calc_auc, print_step_auc
import text_histogram
def ave_weight(gtdata):
gtdata_flat = gtdata.reshape((-1))
weight = np.zeros(shape=gtdata_flat.shape)
ones = np.sum(gtdata)
length = len(gtdata_flat)
zeros = length-ones
zeros_weight = ones/zeros
for i in range(len(gtdata_flat)):
if gtdata_flat[i] > 0.5:
weight[i] = 1.
else:
weight[i] = zeros_weight
weight = weight.reshape(gtdata.shape)
return weight
def train(model_path = None,
save_path = '/home/cjl/tf_runet/models/20180612',
pro_path = '/home/cjl/tf_runet',
max_size = None,
total_step = 0,
display = False,
displaystep = 30,
save = True,
dataidx = 10):
print('begin_train')
data_path = pro_path + '/data/vot2016'
data_provider = VOT2016_Data_Provider(data_path, cfg)
data_provider.random_batch_init()
data_provider.dataidx = dataidx
model = Model()
train_writer = tf.summary.FileWriter(save_path, model.sess.graph)
if model_path is None:
model.init_vars_random()
else:
model.restore(model_path)
import psutil
training = True
while training:
total_step += 1
print('--------------------------------------')
print('total_step:', total_step)
iptdata, gtdata = data_provider.get_a_random_batch(jump=2)
weight = ave_weight(gtdata)
summary, cost, otherlabels, predict = model.train(iptdata, gtdata, weight)
#text_histogram.histogram(list(iptdata.astype(float).reshape((-1))))
#text_histogram.histogram(list(gtdata.astype(float).reshape((-1))))
auc = calc_auc(predict, otherlabels)
print("cost:", cost, " auc:" , auc)
print_step_auc(predict, otherlabels)
text_histogram.histogram(list(predict.astype(float).reshape((-1))))
train_writer.add_summary(summary, total_step)
if (save and total_step % 20 == 0):
filename = save_path + '/train' + str(total_step)
model.save(filename)
print('========================================')
if __name__ == '__main__':
#train()
#train('/home/cjl/models/20171127/train200')
#newclass()
#predict('/home/cjl/models/20171201/train150')
scripts_path = os.path.split( os.path.realpath( sys.argv[0] ) )[0]
train(
pro_path = scripts_path,
model_path = None,
save_path = scripts_path + '/models/0402_all',
max_size = (300,300),
dataidx = 10)
| gpl-3.0 | 5,646,010,547,158,332,000 | 31.090909 | 82 | 0.595609 | false | 3.456548 | false | false | false |
amolenaar/gaphas | gaphas/tool/zoom.py | 1 | 1543 | from gi.repository import Gtk
from gaphas.view import GtkView
class ZoomData:
x0: int
y0: int
sx: float
sy: float
def zoom_tool(view: GtkView) -> Gtk.GestureZoom:
"""Create a zoom tool as a Gtk.Gesture.
Note: we need to keep a reference to this gesture, or else it will be destroyed.
"""
zoom_data = ZoomData()
gesture = (
Gtk.GestureZoom.new(view)
if Gtk.get_major_version() == 3
else Gtk.GestureZoom.new()
)
gesture.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
gesture.connect("begin", on_begin, zoom_data)
gesture.connect("scale-changed", on_scale_changed, zoom_data)
return gesture
def on_begin(
gesture: Gtk.GestureZoom,
sequence: None,
zoom_data: ZoomData,
) -> None:
_, zoom_data.x0, zoom_data.y0 = gesture.get_point(sequence)
view = gesture.get_widget()
zoom_data.sx = view.matrix[0]
zoom_data.sy = view.matrix[3]
def on_scale_changed(
gesture: Gtk.GestureZoom, scale: float, zoom_data: ZoomData
) -> None:
if zoom_data.sx * scale < 0.2:
scale = 0.2 / zoom_data.sx
elif zoom_data.sx * scale > 20.0:
scale = 20.0 / zoom_data.sx
view = gesture.get_widget()
m = view.matrix
sx = m[0]
sy = m[3]
ox = (m[4] - zoom_data.x0) / sx
oy = (m[5] - zoom_data.y0) / sy
dsx = zoom_data.sx * scale / sx
dsy = zoom_data.sy * scale / sy
m.translate(-ox, -oy)
m.scale(dsx, dsy)
m.translate(+ox, +oy)
view.request_update((), view.model.get_all_items())
| lgpl-2.1 | 2,310,033,566,825,498,000 | 24.716667 | 84 | 0.614388 | false | 2.805455 | false | false | false |
sivel/turquoise | turquoise.py | 1 | 4622 | #!/usr/bin/env python
import re
import bson
import logging
from functools import wraps
from happymongo import HapPyMongo
from flask.ext.github import GitHub
from flask import (Flask, session, g, request, url_for, redirect, flash,
render_template, abort)
try:
from cPickle import dumps as pickle_dumps
from cPickle import loads as pickle_loads
except ImportError:
from pickle import dumps as pickle_dumps
from pickle import loads as pickle_loads
app = Flask('turquoise')
app.config.from_envvar('TURQUOISE_CONFIG')
github = GitHub(app)
mongo, db = HapPyMongo(app)
@app.template_filter()
def re_pattern(value):
try:
return pickle_loads(value.encode('utf-8')).pattern
except:
return value
@github.access_token_getter
def token_getter():
user = g.user
if user is not None:
return user['github_access_token']
def login_required(f):
@wraps(f)
def wrapped(*args, **kwargs):
user_id = session.get('user_id')
if not user_id:
return redirect(url_for('login'))
return f(*args, **kwargs)
return wrapped
@app.before_first_request
def logger():
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.errorhandler(500)
def internal_server_error(e):
app.logger.exception(e)
return abort(500)
@app.route('/')
def index():
return render_template('index.html', repos=app.config['GITHUB_REPOS'])
@app.route('/login')
def login():
return github.authorize(scope='user:email')
@app.route('/login/authorized')
@github.authorized_handler
def authorized(oauth_token):
if oauth_token is None:
flash('Authorization failed.', 'danger')
return redirect('index')
g.user = db.users.find_one({'github_access_token': oauth_token})
if not g.user:
g.user = {
'github_access_token': oauth_token,
'regex': '',
'files': [],
'notified': {},
'extra_contact': '',
'self_notify': False,
}
details = github.get('user')
existing = db.users.find_one({'login': details['login']})
if not existing:
g.user.update(details)
g.user['_id'] = db.users.insert(g.user, manipulate=True)
else:
existing['github_access_token'] = oauth_token
existing.update(details)
db.users.update({'_id': existing['_id']},
{'$set': existing})
g.user = existing
else:
details = github.get('user')
g.user.update(details)
db.users.update({'_id': bson.ObjectId(g.user['_id'])},
{'$set': details})
session['user_id'] = str(g.user['_id'])
return redirect(url_for('profile'))
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = db.users.find_one({'_id': bson.ObjectId(session['user_id'])})
@app.route('/profile')
@login_required
def profile():
return render_template('profile.html', repos=app.config['GITHUB_REPOS'])
@app.route('/profile/contact', methods=['POST'])
@login_required
def contact():
partial = {
'extra_contact': request.form.get('contact'),
'self_notify': bool(request.form.get('self_notify'))
}
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': partial})
return redirect(url_for('profile'))
@app.route('/profile/file/add/<path:filename>')
@login_required
def file_add(filename):
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$push': {'files': filename}})
return redirect(url_for('profile'))
@app.route('/profile/file/delete/<path:filename>')
@login_required
def file_delete(filename):
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$pull': {'files': filename}})
return redirect(url_for('profile'))
@app.route('/profile/regex', methods=['POST'])
@login_required
def regex():
try:
compiled = re.compile(request.form.get('regex'))
except re.error as e:
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': {'regex': request.form.get('regex')}})
flash('Invalid regular expression: %s' % e, 'danger')
else:
pickled = pickle_dumps(compiled)
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': {'regex': pickled}})
return redirect(url_for('profile'))
if __name__ == '__main__':
app.run('0.0.0.0', 5000, debug=True)
| apache-2.0 | -1,303,330,272,783,548,400 | 26.02924 | 78 | 0.598875 | false | 3.582946 | false | false | false |
mentii/mentii | Backend/mentii/user_ctrl.py | 1 | 14654 | import boto3
import re
from flask import render_template
from flask_mail import Message
from boto3.dynamodb.conditions import Key, Attr
from utils.ResponseCreation import ControllerResponse
from utils import db_utils as dbUtils
import utils.MentiiLogging as MentiiLogging
import uuid
import hashlib
import class_ctrl as class_ctrl
from flask import g
def sendForgotPasswordEmail(httpOrigin, jsonData, mailer, dbInstance):
email = jsonData.get('email', None)
resetPasswordId = str(uuid.uuid4())
success = addResetPasswordIdToUser(email, resetPasswordId, dbInstance)
if success == True:
host = getProperEnvironment(httpOrigin)
url = host + '/reset-password/{0}'.format(resetPasswordId)
message = render_template('forgotPasswordEmail.html', url=url)
#Build Message
msg = Message('Mentii: Reset Password', recipients=[email], extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def addResetPasswordIdToUser(email, resetPasswordId, dbInstance):
success = False;
table = dbUtils.getTable('users', dbInstance)
if table is not None:
user = getUserByEmail(email,dbInstance)
if user is not None:
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET resetPasswordId = :a',
'ExpressionAttributeValues': { ':a': resetPasswordId },
'ReturnValues' : 'UPDATED_NEW'
}
dbUtils.updateItem(jsonData, table)
success = True
return success
def resetUserPassword(jsonData, dbInstance):
response = ControllerResponse()
email = jsonData.get('email', None)
password = jsonData.get('password', None)
resetPasswordId = jsonData.get('id', None)
if email is not None and password is not None and resetPasswordId is not None:
res = updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance)
if res is not None:
response.addToPayload('status', 'Success')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
return response
def updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance):
res = None
user = getUserByEmail(email, dbInstance)
if user is not None:
storedResetPasswordId = user.get('resetPasswordId', None)
if storedResetPasswordId == resetPasswordId:
table = dbUtils.getTable('users', dbInstance)
if table is not None:
hashedPassword = hashPassword(password)
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET password = :a REMOVE resetPasswordId',
'ExpressionAttributeValues': { ':a': hashedPassword },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(jsonData, table)
return res
def getProperEnvironment(httpOrigin):
host = ''
if httpOrigin.find('stapp') != -1:
host = 'http://stapp.mentii.me'
elif httpOrigin.find('app') != -1:
host = 'http://app.mentii.me'
else:
host = 'http://localhost:3000'
return host
def register(httpOrigin, jsonData, mailer, dbInstance):
response = ControllerResponse()
if not validateRegistrationJSON(jsonData):
response.addError('Register Validation Error', 'The json data did not have an email or did not have a password')
else:
email = parseEmail(jsonData)
password = parsePassword(jsonData)
if not isEmailValid(email):
response.addError('Email invalid', 'The email is invalid')
if not isPasswordValid(password):
response.addError('Password Invalid', 'The password is invalid')
if isEmailInSystem(email, dbInstance) and isUserActive(getUserByEmail(email, dbInstance)):
response.addError('Registration Failed', 'We were unable to register this user')
if not response.hasErrors():
hashedPassword = hashPassword(parsePassword(jsonData))
activationId = addUserAndSendEmail(httpOrigin, email, hashedPassword, mailer, dbInstance)
if activationId is None:
response.addError('Activation Id is None', 'Could not create an activation Id')
return response
def hashPassword(password):
return hashlib.md5( password ).hexdigest()
def validateRegistrationJSON(jsonData):
'''
Validate that the JSON object contains
an email and password attributes
'''
if jsonData is not None:
return 'password' in jsonData.keys() and 'email' in jsonData.keys()
return False
def parseEmail(jsonData):
try:
email = jsonData['email']
return email
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def parsePassword(jsonData):
try:
password = jsonData['password']
return password
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def isEmailValid(email):
'''
Validate that thee email is matches the
format required.
'''
emailRegex = re.compile(r"[^@]+@[^@]+\.[^@]+")
return emailRegex.match(email) is not None
def isPasswordValid(password):
return len(password) >= 8
def addUserAndSendEmail(httpOrigin, email, password, mailer, dbInstance):
activationId = str(uuid.uuid4())
table = dbUtils.getTable('users', dbInstance)
jsonData = {
'email': email,
'password': password,
'activationId': activationId,
'active': 'F',
'userRole' : "student"
}
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in addUserAndSendEmail')
activationId = None
#This will change an existing user with the same email.
response = dbUtils.putItem(jsonData,table)
if response is None:
MentiiLogging.getLogger().error('Unable to add user to table users in addUserAndSendEmail')
activationId = None
try:
sendEmail(httpOrigin, email, activationId, mailer)
except Exception as e:
MentiiLogging.getLogger().exception(e)
return activationId
def deleteUser(email, dbInstance):
table = dbUtils.getTable('users', dbInstance)
key = {'email': email}
response = dbUtils.deleteItem(key, table)
return response
def sendEmail(httpOrigin, email, activationId, mailer):
'''
Create a message and send it from our email to
the passed in email. The message should contain
a link built with the activationId
'''
if activationId is None:
return
#Change the URL to the appropriate environment
host = getProperEnvironment(httpOrigin)
url = host + '/activation/{0}'.format(activationId)
message = render_template('registrationEmail.html', url=url)
#Build Message
msg = Message('Mentii: Thank You for Creating an Account!', recipients=[email],
extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def isEmailInSystem(email, dbInstance):
user = getUserByEmail(email, dbInstance)
return user != None and 'email' in user.keys()
def activate(activationId, dbInstance):
response = ControllerResponse()
table = dbUtils.getTable('users', dbInstance)
items = []
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in activate')
response.addError('Could not access table. Error', 'The DB did not give us the table')
return response
#Scan for the email associated with this activationId
scanResponse = dbUtils.scanFilter('activationId', activationId, table)
if scanResponse is not None:
#scanResponse is a dictionary that has a list of 'Items'
items = scanResponse['Items']
if not items or 'email' not in items[0].keys():
response.addError('No user with activationid', 'The DB did not return a user with the passed in activationId')
else:
email = items[0]['email']
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET active = :a',
'ExpressionAttributeValues': { ':a': 'T' },
'ReturnValues' : 'UPDATED_NEW'
}
#Update using the email we have
res = dbUtils.updateItem(jsonData, table)
response.addToPayload('status', 'Success')
return response
def isUserActive(user):
return user != None and 'active' in user.keys() and user['active'] == 'T'
def getUserByEmail(email, dbInstance):
user = None
table = dbUtils.getTable('users', dbInstance)
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in getUserByEmail')
else:
key = {'Key' : {'email': email}}
result = dbUtils.getItem(key, table)
if result is None:
MentiiLogging.getLogger().error('Unable to get the user with email: ' + email + ' in getUserByEmail ')
elif 'Item' in result.keys():
user = result['Item']
return user
def changeUserRole(jsonData, dbInstance, adminRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and adminRole will need to be passed to the function
if g: # pragma: no cover
adminRole = g.authenticatedUser['userRole']
#adminRole is confirmed here incase changeUserRole is called from somewhere
#other than app.py changeUserRole()
if adminRole != 'admin':
response.addError('Role Error', 'Only admins can change user roles')
elif 'email' not in jsonData.keys() or 'userRole' not in jsonData.keys():
response.addError('Key Missing Error', 'Email or role missing from json data')
else:
email = jsonData['email']
userRole = jsonData['userRole']
userTable = dbUtils.getTable('users', dbInstance)
if userTable is None:
MentiiLogging.getLogger().error('Unable to get table "users" in changeUserRole')
response.addError('No Access to Data', 'Unable to get data from database')
else:
if userRole != 'student' and userRole != 'teacher' and userRole != 'admin':
MentiiLogging.getLogger().error('Invalid role: ' + userRole + ' specified. Unable to change user role')
response.addError('Invalid Role Type', 'Invaid role specified')
else:
data = {
'Key': {'email': email},
'UpdateExpression': 'SET userRole = :ur',
'ExpressionAttributeValues': { ':ur': userRole },
'ReturnValues' : 'UPDATED_NEW'
}
result = dbUtils.updateItem(data, userTable)
if result is None:
MentiiLogging.getLogger().error('Unable to update the user with email: ' + email + ' in changeUserRole')
response.addError('Result Update Error', 'Could not update the user role in database')
else:
response.addToPayload('Result:', result)
response.addToPayload('success', 'true')
return response
def getRole(userEmail, dynamoDBInstance):
'''
Returns the role of the user whose email is pased. If we are unable to get
this information from the DB the role None is returned. Calling code must
grant only student permissions in this case.
'''
userRole = None
table = dbUtils.getTable('users', dynamoDBInstance)
if table is None:
MentiiLogging.getLogger().error('Could not get user table in getUserRole')
else:
request = {"Key" : {"email": userEmail}, "ProjectionExpression": "userRole"}
res = dbUtils.getItem(request, table)
if res is None or 'Item' not in res:
MentiiLogging.getLogger().error('Could not get role for user ' + userEmail)
else:
userRole = res['Item']['userRole']
return userRole
def joinClass(jsonData, dynamoDBInstance, email=None, userRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and email will need to be passed to the function
if g: # pragma: no cover
email = g.authenticatedUser['email']
userRole = g.authenticatedUser['userRole']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
elif userRole == 'teacher' or userRole == 'admin':
if class_ctrl.isCodeInTaughtList(jsonData, dynamoDBInstance, email):
response.addError('Role Error', 'Teachers cannot join their taught class as a student')
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
return response
def addDataToClassAndUser(classCode, email, response, dynamoDBInstance):
updatedClassCodes = addClassCodeToStudent(email, classCode, dynamoDBInstance)
if not updatedClassCodes:
response.addError('joinClass call Failed', 'Unable to update user data')
else:
updatedClass = addStudentToClass(classCode, email, dynamoDBInstance)
if not updatedClass:
response.addError('joinClass call Failed', 'Unable to update class data')
else:
response.addToPayload('title', updatedClass['title'])
response.addToPayload('code', updatedClass['code'])
def leaveClass(jsonData, dynamoDBInstance, email=None):
response = ControllerResponse()
data = None
if g: # pragma: no cover
email = g.authenticatedUser['email']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
else:
classCode = jsonData['code']
data = {
'email': email,
'classCode': classCode
}
return class_ctrl.removeStudent(dynamoDBInstance, data, response=response, userRole=None)
def addClassCodeToStudent(email, classCode, dynamoDBInstance):
userTable = dbUtils.getTable('users', dynamoDBInstance)
if userTable:
codeSet = set([classCode])
addClassToUser = {
'Key': {'email': email},
'UpdateExpression': 'ADD classCodes :i',
'ExpressionAttributeValues': { ':i': codeSet },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(addClassToUser, userTable)
if ( res and
'Attributes' in res and
'classCodes' in res['Attributes'] and
classCode in res['Attributes']['classCodes']
):
return res['Attributes']['classCodes']
return None
def addStudentToClass(classCode, email, dynamoDBInstance):
classTable = dbUtils.getTable('classes', dynamoDBInstance)
if classTable:
emailSet = set([email])
addUserToClass = {
'Key': {'code': classCode},
'UpdateExpression': 'ADD students :i',
'ExpressionAttributeValues': { ':i': emailSet },
'ReturnValues' : 'ALL_NEW'
}
res = dbUtils.updateItem(addUserToClass, classTable)
if ( res and
'Attributes' in res and
'students' in res['Attributes'] and
email in res['Attributes']['students'] and
'title' in res['Attributes']
):
return res['Attributes']
return None
| mit | -6,239,574,435,839,098,000 | 34.567961 | 142 | 0.696806 | false | 3.867511 | false | false | false |
justinmuller/buck | third-party/py/pex/pex/sorter.py | 55 | 1403 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from .package import EggPackage, SourcePackage, WheelPackage
class Sorter(object):
DEFAULT_PACKAGE_PRECEDENCE = (
WheelPackage,
EggPackage,
SourcePackage,
)
@classmethod
def package_type_precedence(cls, package, precedence=DEFAULT_PACKAGE_PRECEDENCE):
for rank, package_type in enumerate(reversed(precedence)):
if isinstance(package, package_type):
return rank
# If we do not recognize the package, it gets lowest precedence
return -1
@classmethod
def package_precedence(cls, package, precedence=DEFAULT_PACKAGE_PRECEDENCE):
return (
package.version, # highest version
cls.package_type_precedence(package, precedence=precedence), # type preference
package.local) # prefer not fetching over the wire
def __init__(self, precedence=None):
self._precedence = precedence or self.DEFAULT_PACKAGE_PRECEDENCE
# return sorted list of (possibly filtered) packages from the list
def sort(self, packages, filter=True):
key = lambda package: self.package_precedence(package, self._precedence)
return [
package for package in sorted(packages, key=key, reverse=True)
if not filter or any(isinstance(package, package_cls) for package_cls in self._precedence)]
| apache-2.0 | 4,735,634,698,640,328,000 | 36.918919 | 99 | 0.722737 | false | 4.251515 | false | false | false |
ngonzalvez/sentry | src/sentry/api/endpoints/organization_member_details.py | 7 | 5209 | from __future__ import absolute_import
from django.db import transaction
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission
)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import (
AuditLogEntryEvent, AuthIdentity, AuthProvider, OrganizationMember,
OrganizationMemberType
)
ERR_NO_AUTH = 'You cannot remove this member with an unauthenticated API request.'
ERR_INSUFFICIENT_ROLE = 'You cannot remove a member who has more access than you.'
ERR_INSUFFICIENT_SCOPE = 'You are missing the member:delete scope.'
ERR_ONLY_OWNER = 'You cannot remove the only remaining owner of the organization.'
ERR_UNINVITABLE = 'You cannot send an invitation to a user who is already a full member.'
class OrganizationMemberSerializer(serializers.Serializer):
reinvite = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
scope_map = {
'GET': ['member:read', 'member:write', 'member:delete'],
'POST': ['member:write', 'member:delete'],
'PUT': ['member:write', 'member:delete'],
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': ['member:read', 'member:write', 'member:delete'],
}
class OrganizationMemberDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
)
else:
queryset = OrganizationMember.objects.filter(
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def _is_only_owner(self, member):
if member.type != OrganizationMemberType.OWNER:
return False
queryset = OrganizationMember.objects.filter(
organization=member.organization_id,
type=OrganizationMemberType.OWNER,
has_global_access=True,
user__isnull=False,
).exclude(id=member.id)
if queryset.exists():
return False
return True
def put(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
serializer = OrganizationMemberSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(status=400)
has_sso = AuthProvider.objects.filter(
organization=organization,
).exists()
result = serializer.object
# XXX(dcramer): if/when this expands beyond reinvite we need to check
# access level
if result.get('reinvite'):
if om.is_pending:
om.send_invite_email()
elif has_sso and not getattr(om.flags, 'sso:linked'):
om.send_sso_link_email()
else:
# TODO(dcramer): proper error message
return Response({'detail': ERR_UNINVITABLE}, status=400)
return Response(status=204)
def delete(self, request, organization, member_id):
if request.user.is_superuser:
authorizing_access = OrganizationMemberType.OWNER
elif request.user.is_authenticated():
try:
authorizing_access = OrganizationMember.objects.get(
organization=organization,
user=request.user,
has_global_access=True,
).type
except OrganizationMember.DoesNotExist:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
elif request.access.has_scope('member:delete'):
authorizing_access = OrganizationMemberType.OWNER
else:
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if om.type < authorizing_access:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
if self._is_only_owner(om):
return Response({'detail': ERR_ONLY_OWNER}, status=403)
audit_data = om.get_audit_log_data()
with transaction.atomic():
AuthIdentity.objects.filter(
user=om.user,
auth_provider__organization=organization,
).delete()
om.delete()
self.create_audit_entry(
request=request,
organization=organization,
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_REMOVE,
data=audit_data,
)
return Response(status=204)
| bsd-3-clause | 7,032,361,661,708,778,000 | 34.195946 | 89 | 0.630639 | false | 4.553322 | false | false | false |
deepmind/android_env | android_env/environment.py | 1 | 4381 | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android environment implementation."""
from typing import Any, Dict
from absl import logging
from android_env.components import coordinator as coordinator_lib
import dm_env
import numpy as np
class AndroidEnv(dm_env.Environment):
"""An RL environment that interacts with Android apps."""
def __init__(self, coordinator: coordinator_lib.Coordinator):
"""Initializes the state of this AndroidEnv object."""
self._coordinator = coordinator
self._latest_action = {}
self._latest_observation = {}
self._latest_extras = {}
self._reset_next_step = True
logging.info('Action spec: %s', self.action_spec())
logging.info('Observation spec: %s', self.observation_spec())
logging.info('Task extras spec: %s', self.task_extras_spec())
def action_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.action_spec()
def observation_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.observation_spec()
def task_extras_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.task_extras_spec()
@property
def raw_action(self):
return self._latest_action
@property
def raw_observation(self):
return self._latest_observation
def android_logs(self) -> Dict[str, Any]:
return self._coordinator.get_logs()
def reset(self) -> dm_env.TimeStep:
"""Resets the environment for a new RL episode."""
logging.info('Resetting AndroidEnv...')
# Reset state of the environment.
self._coordinator.reset_environment_state()
# Execute selected action (None when resetting).
obs, _, extras, _ = self._coordinator.execute_action(action=None)
# Process relevant information.
if obs is not None:
self._latest_observation = obs.copy()
self._latest_extras = extras.copy()
self._latest_action = {}
self._reset_next_step = False
logging.info('Done resetting AndroidEnv.')
logging.info('************* NEW EPISODE *************')
return dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
observation=self._latest_observation,
reward=0.0,
discount=0.0)
def step(self, action: Dict[str, np.ndarray]) -> dm_env.TimeStep:
"""Takes a step in the environment."""
# Check if it's time to reset the episode.
if self._reset_next_step:
return self.reset()
# Execute selected action.
obs, reward, extras, episode_end = self._coordinator.execute_action(action)
# Process relevant information.
if obs is not None:
self._latest_observation = obs.copy()
self._latest_extras = extras.copy()
self._latest_action = action.copy()
self._reset_next_step = episode_end
# Return timestep with reward and observation just computed.
if episode_end:
return dm_env.termination(
observation=self._latest_observation, reward=reward)
else:
return dm_env.transition(
observation=self._latest_observation, reward=reward, discount=0.0)
def task_extras(self, latest_only: bool = True) -> Dict[str, np.ndarray]:
"""Returns latest task extras."""
task_extras = {}
for key, spec in self.task_extras_spec().items():
if key in self._latest_extras:
extra_values = self._latest_extras[key].astype(spec.dtype)
for extra in extra_values:
spec.validate(extra)
task_extras[key] = extra_values[-1] if latest_only else extra_values
return task_extras
def close(self) -> None:
"""Cleans up running processes, threads and local files."""
logging.info('Cleaning up AndroidEnv...')
if hasattr(self, '_coordinator'):
self._coordinator.close()
logging.info('Done cleaning up AndroidEnv.')
def __del__(self) -> None:
self.close()
| apache-2.0 | 2,356,231,852,773,265,000 | 31.93985 | 79 | 0.679297 | false | 3.883865 | true | false | false |
e-koch/Phys-595 | project_code/Machine Learning/hist2d.py | 1 | 2361 | '''
My alterations of hist2d from the triangle package.
'''
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
def hist2d(x, y, *args, **kwargs):
"""
Plot a 2-D histogram of samples.
"""
ax = kwargs.pop("ax", pl.gca())
extent = kwargs.pop("extent", [[x.min(), x.max()], [y.min(), y.max()]])
bins = kwargs.pop("bins", 50)
color = kwargs.pop("color", "k")
linewidths = kwargs.pop("linewidths", None)
plot_datapoints = kwargs.get("plot_datapoints", True)
plot_contours = kwargs.get("plot_contours", False)
cmap = kwargs.get("cmap", 'gray')
cmap = cm.get_cmap(cmap)
cmap._init()
cmap._lut[:-3, :-1] = 0.
cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)
X = np.linspace(extent[0][0], extent[0][1], bins + 1)
Y = np.linspace(extent[1][0], extent[1][1], bins + 1)
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y),
weights=kwargs.get('weights', None))
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"`extent` argument.")
V = 1.0 - np.exp(-0.5 * np.arange(1.5, 2.1, 0.5) ** 2)
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
X, Y = X[:-1], Y[:-1]
if plot_datapoints:
ax.plot(x, y, "o", color=color, ms=1.5, zorder=-1, alpha=0.2,
rasterized=True)
if plot_contours:
ax.contourf(X1, Y1, H.T, [V[-1], H.max()],
cmap=LinearSegmentedColormap.from_list("cmap",
([1] * 3,
[1] * 3),N=2),
antialiased=False)
if plot_contours:
# ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)
ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)
ax.set_xlim(extent[0])
ax.set_ylim(extent[1]) | mit | -2,448,166,880,161,317,400 | 31.805556 | 78 | 0.501482 | false | 3.135458 | false | false | false |
EconForge/dolo | dolo/misc/multimethod.py | 1 | 10255 | ### The following originates from https://github.com/coady/multimethod
# Copyright 2020 Aric Coady
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import abc
import collections
import functools
import inspect
import itertools
import types
import typing
from typing import Callable, Iterable, Iterator, Mapping
__version__ = "1.3"
def groupby(func: Callable, values: Iterable) -> dict:
"""Return mapping of key function to values."""
groups = collections.defaultdict(list) # type: dict
for value in values:
groups[func(value)].append(value)
return groups
def get_types(func: Callable) -> tuple:
"""Return evaluated type hints in order."""
if not hasattr(func, "__annotations__"):
return ()
annotations = dict(typing.get_type_hints(func))
annotations.pop("return", None)
params = inspect.signature(func).parameters
return tuple(annotations.pop(name, object) for name in params if annotations)
class DispatchError(TypeError):
pass
class subtype(type):
"""A normalized generic type which checks subscripts."""
def __new__(cls, tp, *args):
if tp is typing.Any:
return object
if isinstance(tp, typing.TypeVar):
if not tp.__constraints__:
return object
tp = typing.Union[tp.__constraints__]
origin = getattr(tp, "__extra__", getattr(tp, "__origin__", tp))
args = tuple(map(cls, getattr(tp, "__args__", None) or args))
if set(args) <= {object} and not (origin is tuple and args):
return origin
bases = (origin,) if type(origin) is type else ()
namespace = {"__origin__": origin, "__args__": args}
return type.__new__(cls, str(tp), bases, namespace)
def __init__(self, tp, *args):
if isinstance(self.__origin__, abc.ABCMeta):
self.__origin__.register(self)
def __getstate__(self):
return self.__origin__, self.__args__
def __eq__(self, other):
return (
isinstance(other, subtype) and self.__getstate__() == other.__getstate__()
)
def __hash__(self):
return hash(self.__getstate__())
def __subclasscheck__(self, subclass):
origin = getattr(
subclass, "__extra__", getattr(subclass, "__origin__", subclass)
)
args = getattr(subclass, "__args__", ())
if origin is typing.Union:
return all(issubclass(cls, self) for cls in args)
if self.__origin__ is typing.Union:
return issubclass(subclass, self.__args__)
return ( # check args first to avoid a recursion error in ABCMeta
len(args) == len(self.__args__)
and issubclass(origin, self.__origin__)
and all(map(issubclass, args, self.__args__))
)
class signature(tuple):
"""A tuple of types that supports partial ordering."""
parents = None # type: set
def __new__(cls, types: Iterable):
return tuple.__new__(cls, map(subtype, types))
def __le__(self, other) -> bool:
return len(self) <= len(other) and all(map(issubclass, other, self))
def __lt__(self, other) -> bool:
return self != other and self <= other
def __sub__(self, other) -> tuple:
"""Return relative distances, assuming self >= other."""
mros = (subclass.mro() for subclass in self)
return tuple(
mro.index(cls if cls in mro else object) for mro, cls in zip(mros, other)
)
class multimethod(dict):
"""A callable directed acyclic graph of methods."""
pending = None # type: set
def __new__(cls, func):
namespace = inspect.currentframe().f_back.f_locals
self = functools.update_wrapper(dict.__new__(cls), func)
self.pending = set()
self.get_type = type # default type checker
return namespace.get(func.__name__, self)
def __init__(self, func: Callable):
try:
self[get_types(func)] = func
except NameError:
self.pending.add(func)
def register(self, *args):
"""Decorator for registering a function.
Optionally call with types to return a decorator for unannotated functions.
"""
if len(args) == 1 and hasattr(args[0], "__annotations__"):
return overload.register(self, *args)
return lambda func: self.__setitem__(args, func) or func
def __get__(self, instance, owner):
return self if instance is None else types.MethodType(self, instance)
def parents(self, types: tuple) -> set:
"""Find immediate parents of potential key."""
parents = {key for key in self if isinstance(key, signature) and key < types}
return parents - {ancestor for parent in parents for ancestor in parent.parents}
def clean(self):
"""Empty the cache."""
for key in list(self):
if not isinstance(key, signature):
super().__delitem__(key)
def __setitem__(self, types: tuple, func: Callable):
self.clean()
types = signature(types)
parents = types.parents = self.parents(types)
for key in self:
if types < key and (not parents or parents & key.parents):
key.parents -= parents
key.parents.add(types)
if any(isinstance(cls, subtype) for cls in types):
self.get_type = get_type # switch to slower generic type checker
super().__setitem__(types, func)
self.__doc__ = self.docstring
def __delitem__(self, types: tuple):
self.clean()
super().__delitem__(types)
for key in self:
if types in key.parents:
key.parents = self.parents(key)
self.__doc__ = self.docstring
def __missing__(self, types: tuple) -> Callable:
"""Find and cache the next applicable method of given types."""
self.evaluate()
if types in self:
return self[types]
groups = groupby(signature(types).__sub__, self.parents(types))
keys = groups[min(groups)] if groups else []
funcs = {self[key] for key in keys}
if len(funcs) == 1:
return self.setdefault(types, *funcs)
msg = f"{self.__name__}: {len(keys)} methods found" # type: ignore
raise DispatchError(msg, types, keys)
def __call__(self, *args, **kwargs):
"""Resolve and dispatch to best method."""
return self[tuple(map(self.get_type, args))](*args, **kwargs)
def evaluate(self):
"""Evaluate any pending forward references.
This can be called explicitly when using forward references,
otherwise cache misses will evaluate.
"""
while self.pending:
func = self.pending.pop()
self[get_types(func)] = func
@property
def docstring(self):
"""a descriptive docstring of all registered functions"""
docs = []
for func in set(self.values()):
try:
sig = inspect.signature(func)
except ValueError:
sig = ""
doc = func.__doc__ or ""
docs.append(f"{func.__name__}{sig}\n {doc}")
return "\n\n".join(docs)
class multidispatch(multimethod):
"""Provisional wrapper for future compatibility with `functools.singledispatch`."""
get_type = multimethod(type)
get_type.__doc__ = """Return a generic `subtype` which checks subscripts."""
for atomic in (Iterator, str, bytes):
get_type[
atomic,
] = type
@multimethod # type: ignore[no-redef]
def get_type(arg: tuple):
"""Return generic type checking all values."""
return subtype(type(arg), *map(get_type, arg))
@multimethod # type: ignore[no-redef]
def get_type(arg: Mapping):
"""Return generic type checking first item."""
return subtype(type(arg), *map(get_type, next(iter(arg.items()), ())))
@multimethod # type: ignore[no-redef]
def get_type(arg: Iterable):
"""Return generic type checking first value."""
return subtype(type(arg), *map(get_type, itertools.islice(arg, 1)))
def isa(*types) -> Callable:
"""Partially bound `isinstance`."""
return lambda arg: isinstance(arg, types)
class overload(collections.OrderedDict):
"""Ordered functions which dispatch based on their annotated predicates."""
__get__ = multimethod.__get__
def __new__(cls, func):
namespace = inspect.currentframe().f_back.f_locals
self = functools.update_wrapper(super().__new__(cls), func)
return namespace.get(func.__name__, self)
def __init__(self, func: Callable):
self[inspect.signature(func)] = func
def __call__(self, *args, **kwargs):
"""Dispatch to first matching function."""
for sig, func in reversed(self.items()):
arguments = sig.bind(*args, **kwargs).arguments
if all(
predicate(arguments[name])
for name, predicate in func.__annotations__.items()
):
return func(*args, **kwargs)
raise DispatchError("No matching functions found")
def register(self, func: Callable) -> Callable:
"""Decorator for registering a function."""
self.__init__(func) # type: ignore
return self if self.__name__ == func.__name__ else func # type: ignore
class multimeta(type):
"""Convert all callables in namespace to multimethods."""
class __prepare__(dict):
def __init__(*args):
pass
def __setitem__(self, key, value):
if callable(value):
value = getattr(self.get(key), "register", multimethod)(value)
super().__setitem__(key, value)
| bsd-2-clause | 8,543,980,173,115,379,000 | 32.956954 | 88 | 0.59805 | false | 4.175489 | false | false | false |
h-mayorquin/competitive_and_selective_learning | test_scl.py | 1 | 3092 | """
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
plot = True
verbose = False
tracking = False
selection = True
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105
n_features = 2
centers = 3
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# Seed the random number generator
np.random.seed(random_state)
# The algorithm
N = 3
m = 1
s = 2 # Number of neurons to change per round
D = math.inf
eta = 1.0 / n_samples
eta = 0.1
neurons = np.random.rand(N, n_features)
D_vector = np.zeros(n_samples)
T = 50
# Initialize neuron to data hash with empty list
neuron_to_data = {}
for neuron in range(N):
neuron_to_data[neuron] = []
follow_neuron_0_x = []
follow_neuron_0_y = []
follow_neuron_1_x = []
follow_neuron_1_y = []
follow_neuron_2_x = []
follow_neuron_2_y = []
total_distortion = []
time = np.arange(T)
s_half_life = 10
s_0 = 2
s_sequence = np.floor(s_0 * np.exp(-time / s_half_life)).astype('int')
for t, s in zip(time, s_sequence):
# Data loop
for x_index, x in enumerate(X):
# Conventional competitive learning
distances = np.linalg.norm(neurons - x, axis=1)
closest_neuron = np.argmin(distances)
# Modify neuron weight
difference = x - neurons[closest_neuron, :]
neurons[closest_neuron, :] += eta * difference
# Store the distance to each
D_vector[x_index] = np.linalg.norm(neurons[closest_neuron, :] - x)
neuron_to_data[closest_neuron].append(x_index)
if tracking:
follow_neuron_0_x.append(neurons[0, 0])
follow_neuron_0_y.append(neurons[0, 1])
follow_neuron_1_x.append(neurons[1, 0])
follow_neuron_1_y.append(neurons[1, 1])
follow_neuron_2_x.append(neurons[2, 0])
follow_neuron_2_y.append(neurons[2, 1])
# Selection
if selection:
neurons = selection_algorithm(neurons, D_vector, neuron_to_data, s)
if verbose:
print('winning neuron', closest_neuron)
print('distances', distances)
if t % 10 == 0:
print('time', t)
total_distortion.append(np.sum(D_vector))
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(211)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
ax.plot(neurons[0, 0], neurons[0, 1], 'o', markersize=12, label='neuron 1')
ax.plot(neurons[1, 0], neurons[1, 1], 'o', markersize=12, label='neuron 2')
ax.plot(neurons[2, 0], neurons[2, 1], 'o', markersize=12, label='neuron 3')
ax.legend()
if tracking:
ax.plot(follow_neuron_0_x, follow_neuron_0_y, 'o-', markersize=12)
ax.plot(follow_neuron_1_x, follow_neuron_1_y, 'o-', markersize=12)
ax.plot(follow_neuron_2_x, follow_neuron_2_y, 'o-', markersize=12)
ax2 = fig.add_subplot(212)
ax2.plot(time, total_distortion)
plt.show()
| mit | 6,980,486,067,870,916,000 | 24.553719 | 83 | 0.625809 | false | 2.98744 | false | false | false |
lmgichin/formations | python/Introspection.py | 1 | 1247 | # -*- coding: utf8 -*-
class Personne:
"Définition de la classe personne"
# Pas de surcharge
# Pas de polymorphisme direct
### Attributs statiques de classe ###
ctr = 0
### Attributs d'instance ###
# Pas de définition préalable
### Méthode appelée avant la création de l'instance ###
def __new__(self):
pass
### Constructeur ###
def __init__(self, nom, prenom):
Personne.ctr += 1
self.nom = nom
self.prenom = prenom
### Destructeur ###
def __del__(self):
pass
### Méthode sollicitée par str ###
def __str__(self):
return self.prenom + " " + self.nom
### Méthode statique ###
def get_ctr():
return Personne.ctr
### getter, setter ###
@property
def name(self):
return self.nom
@name.setter
def name(self, name):
self.nom = name
get_ctr = staticmethod(get_ctr)
pers = Personne("xxx", "Gichin")
pers.name = "Funakoshi"
print "Nom = ", pers.name
### Lister toutes les attributs et méthodes de la classe ###
print "Tous les objets : " + str(dir(pers))
print "Toutes les méthodes : " + str([methode for methode in dir(pers) if callable(getattr(pers, methode))])
| gpl-2.0 | 7,878,477,071,107,503,000 | 20.684211 | 108 | 0.579288 | false | 3.185567 | false | false | false |
HeqetLabs/dynaconfig | dynaconfig/endpoints.py | 1 | 4418 | import datetime
import rethinkdb as r
from flask import request
from flask.ext.restful import Resource, abort
from dynaconfig import db
from time import time
def config_id(user_id, config_name):
return "{}-{}".format(user_id, config_name)
class Config(Resource):
def get(self, user_id, config_name):
current_config = list(r.table("config").get_all(config_id(user_id, config_name), index="name").run(db.conn))
if current_config:
return current_config[0]
else:
return abort(404, message="Could not find config with name='{}' for user id={}".format(config_name, user_id))
def post(self, user_id, config_name):
values = request.json
current_config = list(r.table("config").get_all(config_id(user_id, config_name), index="name").run(db.conn))
if current_config:
current_config = current_config[0]
old_audit = current_config["values"]
_id = current_config["id"]
old_audit = current_config["audit_trail"]
old_values = current_config["values"]
current_version = current_config["highest_version"] + 1
new_audit = self._create_audit(old_values, values, current_version)
if new_audit["changes"]:
return r.table("config").get(_id).update({
"version": r.row["highest_version"] + 1,
"last_version": r.row["version"],
"highest_version": r.row["highest_version"] + 1,
"values": r.literal(values),
"audit_trail": r.row["audit_trail"].default([]).append(new_audit)
}).run(db.conn)
else:
return abort(302, message="Config did not change")
else:
return r.table("config").insert({
"name": "{}-{}".format(user_id, config_name),
"version": 0,
"highest_version": 0,
"last_version": 0,
"values": values,
"audit_trail": [self._create_audit({}, values, 0)]
}).run(db.conn)
def _create_audit(self, old_values, new_values, version):
audit_values = []
for k in old_values:
if k in new_values:
if old_values[k] != new_values[k]:
audit_values.append({"key": k, "action": "updated", "value": new_values[k]})
else:
audit_values.append({"key": k, "action": "removed", "value": old_values[k]})
new_keys = set(new_values.keys()).difference(set(old_values.keys()))
for k in new_keys:
audit_values.append({
"key": k,
"action": "added",
"value": new_values[k]
})
return {"created_at": int(time() * 1000), "changes": audit_values, "version": version}
class RevertConfig(Resource):
def put(self, user_id, config_name, version):
current_config = list(r.table("config").get_all(config_id(user_id, config_name), index="name").run(db.conn))
if current_config:
current_config = current_config[0]
if 0 <= version <= current_config["highest_version"]:
current_version = current_config["version"]
audit_trail = current_config["audit_trail"]
values = self._revert_config(current_config["values"], audit_trail, version, current_version)
return r.table("config").get(_id).update({
"version": version,
"last_version": r.row["version"],
"values": r.literal(values)
}).run(db.conn)
else:
return abort(404, message="Version={} for config with name='{}' for user id={} could not be found".format(version, config_name, user_id))
else:
return abort(404, message="Could not find config with name='{}' for user id={}".format(config_name, user_id))
def _revert_config(self, config, audits, current_version, expected_version):
assert(not current_version == expected_version)
if current_version > expected_version:
changes = reversed([a for audit_map in map(lambda audit: audit["changes"] if audit["version"] >= expected_version else [], audits) for a in audit_map])
elif expected_version > current_version:
changes =[a for audit_map in map(lambda audit: audit["changes"] if audit["version"] <= expected_version else [], audits) for a in audit_map]
for change in changes:
action = change["action"]
key = change["key"]
value = change["value"]
if action in ["updated", "removed"]:
config[key] = value
elif action == "added":
if expected_version == 0:
config[key] = value
else:
del config[key]
return config
| apache-2.0 | 5,752,779,282,069,932,000 | 35.512397 | 157 | 0.616569 | false | 3.59187 | true | false | false |
nomaro/SickBeard_Backup | sickbeard/providers/ethor.py | 19 | 3033 | # Author: Julien Goret <[email protected]>
# URL: https://github.com/sarakha63/Sick-Beard
#
# This file is based upon tvtorrents.py.
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import generic
from sickbeard import helpers, logger, exceptions, tvcache
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from sickbeard.name_parser.parser import NameParser, InvalidNameException
class EthorProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Ethor")
self.supportsBacklog = False
self.cache = EthorCache(self)
self.url = 'http://ethor.net/'
def isEnabled(self):
return sickbeard.ETHOR
def imageName(self):
return 'ethor.png'
class EthorCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll every 15 minutes
self.minTime = 15
def _getRSSData(self):
if not sickbeard.ETHOR_KEY:
raise exceptions.AuthException("Ethor requires an API key to work correctly")
url = 'http://ethor.net/rss.php?feed=dl&cat=45,43,7&rsskey=' + sickbeard.ETHOR_KEY
logger.log(u"Ethor cache update URL: " + url, logger.DEBUG)
data = self.provider.getURL(url)
return data
def _parseItem(self, item):
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
ltvdb_api_parms['search_all_languages'] = True
(title, url) = self.provider._get_title_and_url(item)
if not title or not url:
logger.log(u"The XML returned from the Ethor RSS feed is incomplete, this result is unusable", logger.ERROR)
return
try:
myParser = NameParser()
parse_result = myParser.parse(title)
except InvalidNameException:
logger.log(u"Unable to parse the filename "+title+" into a valid episode", logger.DEBUG)
return
try:
t = tvdb_api.Tvdb(**ltvdb_api_parms)
showObj = t[parse_result.series_name]
except tvdb_exceptions.tvdb_error:
logger.log(u"TVDB timed out, unable to update episodes from TVDB", logger.ERROR)
return
logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)
self._addCacheEntry(name=title, url=url, tvdb_id=showObj['id'])
provider = EthorProvider()
| gpl-3.0 | 7,512,435,833,235,280,000 | 31.612903 | 120 | 0.665348 | false | 3.694275 | false | false | false |
sebbcn/django-secure-storage | models.py | 1 | 2791 | import pickle
from django.db import models
from .encryption import get_cipher_and_iv, padding
from django.utils.timezone import now
class EncryptedUploadedFileMetaData(models.Model):
''' Meta data for saved files. '''
# File uuid
file_id = models.CharField(max_length=50, primary_key=True)
encrypted_name = models.CharField(max_length=200)
# salt for AES cipher
iv = models.CharField(max_length=50)
# File Access Expiration date
expire_date = models.DateTimeField(auto_now=False, null=True, blank=True)
# File access one time flag
one_time = models.BooleanField(default=False)
# Clear file size
size = models.IntegerField(default=0, null=True, blank=True)
@classmethod
def save_(cls, file_):
''' writes metadata for a given file '''
cipher = get_cipher_and_iv(file_.passphrase, file_.iv)[0]
metadata = cls()
metadata.file_id = file_.name
for attr in ('size', 'one_time', 'iv', 'expire_date'):
setattr(metadata, attr, getattr(file_, attr, None))
# Encrypts plain filename and content-type together
encrypted_name = cipher.encrypt(
padding(file_.clear_filename + '|' + file_.content_type))
metadata.encrypted_name = pickle.dumps(encrypted_name)
metadata.iv = pickle.dumps(metadata.iv)
metadata.save()
return metadata
@classmethod
def update(cls, file_, **kwargs):
''' Updates metadata for a given file '''
from .storage import InexistentFile
try:
metadata = cls.objects.get(
file_id=file_.name)
except cls.DoesNotExist:
raise InexistentFile
for arg, val in kwargs.items():
setattr(metadata, arg, val)
metadata.save()
@classmethod
def load(cls, file_):
''' Load metadata for a given file. '''
from .storage import InexistentFile, ExpiredFile
try:
metadata = cls.objects.get(
file_id=file_.name)
except cls.DoesNotExist:
raise InexistentFile
for attr in ('size', 'one_time', 'iv', 'expire_date'):
setattr(file_, attr, getattr(metadata, attr, None))
file_.iv = pickle.loads(file_.iv)
cipher = get_cipher_and_iv(file_.passphrase, file_.iv)[0]
encrypted_name = pickle.loads(metadata.encrypted_name)
file_.clear_filename, file_.content_type = \
cipher.decrypt(encrypted_name).split('|')
# File access has expired
if file_.expire_date and file_.expire_date < now():
metadata.delete()
raise ExpiredFile('This file has expired')
# File is accessed only once
if file_.one_time:
metadata.delete()
| gpl-2.0 | 8,223,567,217,211,155,000 | 31.453488 | 77 | 0.612325 | false | 4.134815 | false | false | false |
RaphaelArkadyMeyer/LiveCoding | Server Side/build.py | 1 | 1305 |
import os.path as path
from sys import argv
from json import load
from os import system
from subprocess import Popen
def make_frankenstein(question_names, file_name):
global server_directory
global client_directory
def read_json(file_name):
with open(file_name) as input_file:
return load(input_file)
def run_tests(tests, config):
file_dict = {}
for f in config:
file_name = f['file']
for question_name in f['questions']:
file_dict[question_name] = file_name
for test in tests:
questions = test['questions']
stdin = test['input']
args = test['args']
files_made = []
# TODO possibly redundant
for question in questions:
file_name = file_dict[question]
if file_name not in files_made:
make_frankenstein(questions, file_name)
files_made.append(file_name)
#system(config['compile'])
#(stdout, stderr) = Popen(args).communicate(stdin)
def main():
config = read_json(argv[1])
tests = read_json(argv[2])
global server_directory
global client_directory
server_directory = read_json(argv[3])
client_directory = read_json(argv[4])
run_tests(tests, config)
if __name__ == '__main__':
main()
| mit | 6,664,806,969,046,097,000 | 23.622642 | 58 | 0.614559 | false | 3.782609 | true | false | false |
jvanasco/facebook_utils | src/facebook_utils/api_versions.py | 1 | 1661 | # stdlib
import datetime
# ==============================================================================
# last checked 2021/03/25
# https://developers.facebook.com/docs/graph-api/changelog/versions
# Version: Release Date, Expiration Date
_API_VERSIONS = {
"10.0": ["Feb 23, 2021", None],
"9.0": ["Nov 10, 2020", "Feb 23, 2023"],
"8.0": ["Aug 4, 2020", "Nov 1, 2022"],
"7.0": ["May 5, 2020", "Aug 4, 2022"],
"6.0": ["Feb 3, 2020", "May 5, 2022"],
"5.0": ["Oct 29, 2019", "Feb 3, 2022"],
"4.0": ["Jul 29, 2019", "Nov 2, 2021"],
"3.3": ["Apr 30, 2019", "Aug 3, 2021"],
"3.2": ["Oct 23, 2018", "May 4, 2021"],
"3.1": ["Jul 26, 2018", "Oct 27, 2020"],
"3.0": ["May 1, 2018", "Jul 28, 2020"],
"2.12": ["Jan 30, 2018", "May 5, 2020"],
"2.11": ["Nov 7, 2017", "Jan 28, 2020"],
"2.10": ["Jul 18, 2017", "Nov 7, 2019"],
"2.9": ["Apr 18, 2017", "Jul 22, 2019"],
"2.8": ["Oct 5, 2016", "Apr 18, 2019"],
"2.7": ["Jul 13, 2016", "Oct 5, 2018"],
"2.6": ["Apr 12, 2016", "Jul 13, 2018"],
"2.5": ["Oct 7, 2015", "Apr 12, 2018"],
"2.4": ["Jul 8, 2015", "Oct 9, 2017"],
"2.3": ["Mar 25, 2015", "Jul 10, 2017"],
"2.2": ["Oct 30, 2014", "Mar 27, 2017"],
"2.1": ["Aug 7, 2014", "Oct 31, 2016"],
"2.0": ["Apr 30, 2014", "Aug 8, 2016"],
"1.0": ["Apr 21, 2010", "Apr 30, 2015"],
}
# >>> datetime.datetime.strptime("Apr 1, 2010", "%b %d, %Y")
_format = "%b %d, %Y"
API_VERSIONS = {}
for (_v, _ks) in _API_VERSIONS.items():
API_VERSIONS[_v] = [
datetime.datetime.strptime(_ks[0], _format),
datetime.datetime.strptime(_ks[1], _format) if _ks[1] else None,
]
| bsd-3-clause | -4,182,697,686,540,373,000 | 35.911111 | 80 | 0.472607 | false | 2.583204 | false | false | false |
psiwczak/openstack | nova/compute/task_states.py | 11 | 1841 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible task states for instances.
Compute instance task states represent what is happening to the instance at the
current moment. These tasks can be generic, such as 'spawning', or specific,
such as 'block_device_mapping'. These task states allow for a better view into
what an instance is doing and should be displayed to users/administrators as
necessary.
"""
SCHEDULING = 'scheduling'
BLOCK_DEVICE_MAPPING = 'block_device_mapping'
NETWORKING = 'networking'
SPAWNING = 'spawning'
IMAGE_SNAPSHOT = 'image_snapshot'
IMAGE_BACKUP = 'image_backup'
UPDATING_PASSWORD = 'updating_password'
RESIZE_PREP = 'resize_prep'
RESIZE_MIGRATING = 'resize_migrating'
RESIZE_MIGRATED = 'resize_migrated'
RESIZE_FINISH = 'resize_finish'
RESIZE_REVERTING = 'resize_reverting'
RESIZE_CONFIRMING = 'resize_confirming'
RESIZE_VERIFY = 'resize_verify'
REBOOTING = 'rebooting'
REBOOTING_HARD = 'rebooting_hard'
PAUSING = 'pausing'
UNPAUSING = 'unpausing'
SUSPENDING = 'suspending'
RESUMING = 'resuming'
POWERING_OFF = 'powering-off'
POWERING_ON = 'powering-on'
RESCUING = 'rescuing'
UNRESCUING = 'unrescuing'
DELETING = 'deleting'
STOPPING = 'stopping'
STARTING = 'starting'
| apache-2.0 | 6,123,617,326,984,806,000 | 29.683333 | 79 | 0.750136 | false | 3.311151 | false | false | false |
Chiel92/graph-problems | lboolw_heuristic.py | 1 | 3899 | from bitset import iterate, size, subtract, contains, first
from components import components
from utils import Infinity
def get_neighborhood(N, subset):
result = 0
for v in iterate(subset):
result |= N[v]
return result
def get_neighborhood_2(N, subset):
result = get_neighborhood(N, subset)
for v in iterate(result):
result |= N[v]
return result
def increment_un(G, X, UN_X, v):
"""Compute UN of X|v, based on the UN of X"""
U = set()
for S in UN_X:
U.add(subtract(S, v))
U.add(subtract(S, v) | (G.neighborhoods[v] & (G.vertices - (X | v))))
return U
def check_decomposition(G, decomposition):
un = {0}
lboolw = 1
left = 0
right = G.vertices
for v in decomposition:
un = increment_un(G, left | v, v, un)
lboolw = max(lboolw, len(un))
left = left | v
right = subtract(right, v)
return lboolw
def incremental_un_heuristic(G):
lboolw_components = []
decomposition_components = []
for component in components(G):
best_lboolw = Infinity
best_decomposition = None
for i, start in enumerate([first(component)]):
#for i, start in enumerate(iterate(component)):
print('{}th starting vertex'.format(i))
right = subtract(component, start)
left = start
un_left = increment_un(G, 0, {0}, start)
booldim_left = 1
decomposition = [start]
lboolw = len(un_left)
for _ in range(size(component) - 1):
best_vertex, best_un, _ = greedy_step(G, left, right, un_left,
booldim_left, {}, Infinity)
booldim_left = len(best_un)
lboolw = max(lboolw, booldim_left)
un_left = best_un
decomposition.append(best_vertex)
right = subtract(right, best_vertex)
left = left | best_vertex
if lboolw < best_lboolw:
best_lboolw = lboolw
best_decomposition = decomposition
lboolw_components.append(best_lboolw)
decomposition_components.append(best_decomposition)
total_lboolw = max(lboolw_components)
total_decomposition = [v for part in decomposition_components for v in part]
return total_lboolw, total_decomposition
def greedy_step(G, left, right, un_left, booldim_left, un_table, bound):
best_vertex = None
best_booldim = Infinity
best_un = None
if size(right) == 1:
return right, {0}, 1
assert size(right) > 1
candidates = get_neighborhood_2(G.neighborhoods, left) & right
# Trivial cases are slow
for v in iterate(candidates):
if trivial_case(G.neighborhoods, left, right, v):
new_un = increment_un(G, left, un_left, v)
new_booldim = len(new_un)
return v, new_un, new_booldim
for v in iterate(candidates):
if left | v not in un_table:
un_table[left | v] = increment_un(G, left, un_left, v)
new_un = un_table[left | v]
new_booldim = len(new_un)
# Apply pruning
if new_booldim >= bound:
# print('pruning')
continue
if new_booldim < best_booldim:
best_vertex = v
best_booldim = new_booldim
best_un = new_un
# If nothing found
if best_vertex == None:
best_un = increment_un(G, left, un_left, v)
best_booldim = len(best_un)
best_vertex = v
assert best_vertex != None
return best_vertex, best_un, best_booldim
def trivial_case(N, left, right, v):
# No neighbors
if contains(left, N[v]):
return True
# Twins
for u in iterate(left):
if N[v] & right == subtract(N[u], v) & right:
return True
return False
| mit | 2,324,522,540,173,443,000 | 26.652482 | 81 | 0.565017 | false | 3.623606 | false | false | false |
s1341/pyg13 | pyg13.py | 1 | 5972 | #!/usr/bin/env python
import usb.core
import usb.util
import os
import select
import commands
G13_VENDOR_ID = 0x046d
G13_PRODUCT_ID = 0xc21c
G13_KEY_ENDPOINT = 1
G13_LCD_ENDPOINT = 2
G13_REPORT_SIZE = 8
G13_LCD_BUFFER_SIZE = 0x3c0
G13_NUM_KEYS = 40
G13_NUM_MODES = 4
G13_KEYS = (["G%d" % x for x in range(1, 23)] +
["UNDEF1", "LIGHT_STATE", "BD"] +
["L%d" % x for x in range(1, 5)] +
["M%d" % x for x in range(1, 4)] +
["MR", "LEFT", "DOWN", "TOP", "UNDEF3", "LIGHT", "LIGHT2", "MISC_TOGGLE"])
# G13_KEYS = ["G13_KEY_%s" % x for x in G13_KEYS]
LIBUSB_REQUEST_TYPE_STANDARD = (0x00 << 5)
LIBUSB_REQUEST_TYPE_CLASS = (0x01 << 5),
LIBUSB_REQUEST_TYPE_VENDOR = (0x02 << 5)
LIBUSB_REQUEST_TYPE_RESERVED = (0x03 << 5)
LIBUSB_RECIPIENT_DEVICE = 0x00
LIBUSB_RECIPIENT_INTERFACE = 0x01,
LIBUSB_RECIPIENT_ENDPOINT = 0x02
LIBUSB_RECIPIENT_OTHER = 0x03
class G13Device(object):
def __init__(self, device):
self.device = device
self.device.set_configuration()
# TODO: do we need to manually claim the interface?
self.unique_id = "%d_%d" % (self.device.bus, self.device.address)
self.key_maps = [{}] * G13_NUM_MODES
self.key_states = {}
self.mode = 0
self.init_lcd()
self.set_mode_leds(0)
self.set_key_color(0, 0, 0)
# TODO: self.write_lcd(g13_logo)
# TODO: self.uinput = self.create_uinput()
self.create_command_fifo()
def init_lcd(self):
self.device.ctrl_transfer(0, 9, 1, 0, None, 1000)
def set_mode_leds(self, leds):
data = [0x05, leds, 0x00, 0x00, 0x00]
self.device.ctrl_transfer(LIBUSB_REQUEST_TYPE_CLASS[0] |
LIBUSB_RECIPIENT_INTERFACE[0],
9, 0x305, 0, data, 1000)
def set_mode(self, mode):
self.set_mode_leds(mode)
# TODO: implement proper mode handling
def set_key_color(self, red, green, blue):
data = [0x05, red, green, blue, 0x00]
self.device.ctrl_transfer(LIBUSB_REQUEST_TYPE_CLASS[0] |
LIBUSB_RECIPIENT_INTERFACE[0],
9, 0x307, 0, data, 1000)
def create_command_fifo(self):
self.command_fifo_name = "/tmp/g13_cmd_%s" % self.unique_id
if os.path.exists(self.command_fifo_name):
os.remove(self.command_fifo_name)
os.mkfifo(self.command_fifo_name, 0666)
self.command_fifo = os.open(self.command_fifo_name, os.O_RDWR | os.O_NONBLOCK)
def handle_commands(self):
""" Handle commands sent to the command fifo. """
ready = select.select([self.command_fifo], [], [], 0)
if not len(ready[0]):
return False
data = os.read(self.command_fifo, 1000)
print "< %s" % data
lines = data.splitlines()
for line in lines:
command = commands.Command.parse_command(line)
if command:
command.execute(self)
def get_key_state(self, key):
if key not in self.key_states:
return False
return self.key_states[key]
def set_key_state(self, key, state):
self.key_states[key] = state
def get_key_action(self, key):
return self.key_maps[self.mode].get(key, None)
def bind_key(self, key, action):
if key not in G13_KEYS:
raise Exception("The specified key isn't a known G13 key")
self.key_maps[self.mode][key] = action
self.key_states[key] = False
def handle_keys(self):
report = []
try:
report = self.device.read(usb.util.ENDPOINT_IN | G13_KEY_ENDPOINT,
G13_REPORT_SIZE)
except usb.core.USBError, e:
if not str(e).startswith("[Errno 60]"):
raise
if len(report):
for g13_key_index, g13_key_name in enumerate(G13_KEYS):
actual_byte = report[3 + (g13_key_index / 8)]
mask = 1 << (g13_key_index % 8)
is_pressed = (actual_byte & mask) == 0
# if the key has changed state, we're going to want to perform the action
current_state = self.get_key_state(g13_key_name)
# print ["%02x" % x for x in report]
if current_state != is_pressed:
print "key: %s %s -> state %s %s" % (g13_key_name, current_state,
actual_byte & mask, is_pressed)
self.set_key_state(g13_key_name, is_pressed)
if not current_state:
action = self.get_key_action(g13_key_name)
if action:
action.perform(self, is_pressed)
return True
def cleanup(self):
# TODO: destroy the device cleanly?
os.close(self.command_fifo)
os.remove(self.command_fifo_name)
def parse_args():
import argparse
parser = argparse.ArgumentParser(description="user-mode g13 driver")
# parser.add_argument("--verbose", "-v", action=store_const, const=bool,
# default=False, "be verbose")
args = parser.parse_args()
return args
def find_devices():
g13s = []
devices = usb.core.find(idVendor=G13_VENDOR_ID, idProduct=G13_PRODUCT_ID,
find_all=True)
print devices
for device in devices:
g13s.append(G13Device(device))
return g13s
def main():
# args = parse_args()
g13s = find_devices()
print g13s
running = True
while running:
try:
for g13 in g13s:
g13.handle_commands()
status = g13.handle_keys()
if not status:
running = False
except KeyboardInterrupt:
running = False
for g13 in g13s:
g13.cleanup()
if __name__ == '__main__':
main()
| apache-2.0 | 7,844,132,187,072,760,000 | 30.597884 | 89 | 0.548895 | false | 3.395111 | false | false | false |
mbdevpl/maildaemon | maildaemon/message.py | 1 | 12738 | """Handling e-mail messages."""
import datetime
import email.header
import email.message
import logging
import typing as t
import dateutil.parser
from .connection import Connection
_LOG = logging.getLogger(__name__)
def recode_header(raw_data: t.Union[bytes, str]) -> str:
"""Normalize the header value."""
decoded_data = email.header.decode_header(raw_data)
try:
return email.header.make_header(decoded_data)
except UnicodeDecodeError as err:
try:
return email.header.make_header([(decoded_data[0][0], 'utf-8')])
except:
_LOG.exception('both "%s" and "utf-8" fail to decode the header', decoded_data[0][1])
raise ValueError(f'after decoding {raw_data!r}, obtained {decoded_data!r}'
' which cannot be re-made into a header') from err
def is_name_and_address(text: str) -> bool:
return '<' in text and '>' in text
def split_name_and_address(text) -> tuple:
if is_name_and_address(text):
begin = text.rfind('<')
end = text.rfind('>')
assert begin < end
return text[begin + 1:end], text[:begin]
return text, None
def recode_timezone_info(dt: datetime.datetime):
name = dt.tzname()
dst = dt.dst()
dst = (' ' + dst) if dst != datetime.timedelta() else ''
if name == 'UTC':
return f'{name}{dst}'
offset = dt.utcoffset()
offset = ('+' if offset >= datetime.timedelta() else '') + str(offset.total_seconds() / 3600)
if name is None or not name:
return f'UTC{offset}{dst}'
return f'{name} (UTC{offset}{dst})'
class Message:
"""An e-mail message."""
def __init__(self, msg: email.message.EmailMessage = None, server: Connection = None,
folder: str = None, msg_id: int = None):
assert folder is None or isinstance(folder, str), type(folder)
assert msg_id is None or isinstance(msg_id, int), type(msg_id)
self._email_message = msg # type: email.message.EmailMessage
self._origin_server = server # type: Connection
self._origin_folder = folder # type: str
self._origin_id = msg_id # type: int
self.from_address = None # type: str
self.from_name = None # type: str
self.reply_to_address = None # type: str
self.reply_to_name = None # type: str
self.to_address = None # type: str
self.to_name = None # type: str
self.subject = None # type: str
self.datetime = None # type: datetime.datetime
self.timezone = None
self.local_date = None
self.local_time = None
self.received = []
self.return_path = None
self.envelope_to = None
self.message_id = None
self.content_type = None
self.other_headers = []
self.flags = set() # type: t.Set[str]
self.contents = []
self.attachments = []
if msg is not None:
self._init_headers_from_email_message(msg)
self._init_contents_from_email_message(msg)
@property
def date(self) -> datetime.date:
if self.datetime is None:
return None
return self.datetime.date()
@property
def time(self) -> datetime.time:
if self.datetime is None:
return None
return self.datetime.time()
@property
def is_read(self) -> bool:
return 'Seen' in self.flags
@property
def is_unread(self) -> bool:
return not self.is_read()
@property
def is_answered(self) -> bool:
return 'Answered' in self.flags
@property
def is_flagged(self) -> bool:
return 'Flagged' in self.flags
@property
def is_deleted(self) -> bool:
return 'Deleted' in self.flags
def _init_headers_from_email_message(self, msg: email.message.EmailMessage) -> None:
for key, value in msg.items():
self._init_header_from_keyvalue(key, value)
def _init_header_from_keyvalue(self, key: str, value: str) -> None:
if key == 'From':
self.from_address, self.from_name = split_name_and_address(str(recode_header(value)))
elif key == 'Reply-To':
self.reply_to_address, self.reply_to_name = split_name_and_address(
str(recode_header(value)))
elif key == 'To':
self.to_address, self.to_name = split_name_and_address(str(recode_header(value)))
elif key == 'Subject':
self.subject = str(recode_header(value))
elif key == 'Date':
self._init_datetime_from_header_value(value)
elif key == 'Received':
self.received.append(value)
elif key == 'Return-Path':
self.return_path = value
elif key == 'Envelope-To':
self.envelope_to = value
elif key == 'Message-Id':
self.message_id = value
elif key == 'Content-Type':
self.content_type = value
else:
self.other_headers.append((key, value))
def _init_datetime_from_header_value(self, value: str):
self.datetime = None
try:
self.datetime = dateutil.parser.parse(value)
except ValueError:
try:
self.datetime = dateutil.parser.parse(value, fuzzy=True)
_LOG.debug(
'dateutil failed to parse string "%s" into a date/time,'
' using fuzzy=True results in: %s', value, self.datetime, exc_info=1)
except ValueError:
_LOG.debug(
'dateutil failed to parse string "%s" into a date/time,'
' even using fuzzy=True', value, exc_info=1)
if self.datetime is not None:
self.timezone = recode_timezone_info(self.datetime)
def _init_contents_from_email_message(self, msg: email.message.EmailMessage) -> None:
if not msg.get_payload():
return
if msg.get_content_maintype() != 'multipart':
self._init_contents_part(msg)
return
content_type = msg.get_content_type()
parts = msg.get_payload()
if isinstance(parts, str):
_LOG.error('one of %i parts in a message is %s, but it has no subparts',
len(parts), content_type)
assert not parts, parts
return
assert isinstance(parts, list), type(parts)
assert parts
if content_type == 'multipart/alternative':
if len(parts) > 1:
_LOG.warning('taking last alternative of %i available in part type %s'
' - ignoring others', len(parts), content_type)
self._init_contents_from_email_message(parts[-1])
elif content_type == 'multipart/related':
if len(parts) > 1:
_LOG.warning('taking only first part of %i available in part type %s'
' - ignoring related parts', len(parts), content_type)
self._init_contents_from_email_message(parts[0])
elif content_type == 'multipart/mixed':
for part in parts:
self._init_contents_from_email_message(part)
else:
raise NotImplementedError(f'handling of "{content_type}" not implemented')
def _init_contents_part(self, part: email.message.Message):
content_type = part.get_content_type()
if content_type not in {'text/plain', 'text/html'}:
_LOG.info('treating message part with type %s as attachment', content_type)
self.attachments.append(part)
return
charset = part.get_content_charset()
if charset:
text = part.get_payload(decode=True)
try:
text = text.decode(charset)
except UnicodeDecodeError:
_LOG.exception('failed to decode %i-character text using encoding "%s"',
len(text), charset)
else:
text = part.get_payload()
try:
if isinstance(text, bytes):
text = text.decode('utf-8')
except UnicodeDecodeError:
_LOG.exception('failed to decode %i-character text using encoding "%s"',
len(text), 'utf-8')
if not isinstance(text, str):
_LOG.error('no content charset in a message %s in part %s -- attachment?',
self.str_headers_compact(), part.as_bytes()[:128])
self.attachments.append(part)
text = None
if not text:
return
self.contents.append(text)
def move_to(self, server: Connection, folder_name: str) -> None:
"""Move message to a specific folder on a specific server."""
assert isinstance(folder_name, str), type(folder_name)
if server is not self._origin_server:
from .imap_connection import IMAPConnection
assert isinstance(self._origin_server, IMAPConnection), type(self._origin_server)
assert isinstance(server, IMAPConnection), type(server)
parts = self._origin_server.retrieve_message_parts(
self._origin_id, ['UID', 'ENVELOPE', 'FLAGS', 'INTERNALDATE', 'BODY.PEEK[]'],
self._origin_folder)
_LOG.warning('moving %s between servers: from %s "%s" to %s "%s"',
self, self._origin_server, self._origin_folder, server, folder_name)
server.add_message(parts, folder_name)
self._origin_server.delete_message(self._origin_id, self._origin_folder)
return
if folder_name == self._origin_folder:
_LOG.debug('move_to() destination same as origin, nothing to do')
return
from .imap_connection import IMAPConnection
assert isinstance(self._origin_server, IMAPConnection), type(self._origin_server)
_LOG.warning('moving %s within same server %s: from "%s" to "%s"',
self, self._origin_server, self._origin_folder, folder_name)
self._origin_server.move_message(self._origin_id, folder_name, self._origin_folder)
def copy_to(self, server: Connection, folder: str) -> None:
raise NotImplementedError()
def send_via(self, server: Connection) -> None:
server.send_message(self._email_message)
def str_oneline(self):
return (f'{type(self).__name__}(From:{self.from_name}<{self.from_address}>,'
f'To:{self.to_name}<{self.to_address}>,Subject:{self.subject},'
f'DateAndTime:{self.datetime})')
def str_headers(self):
return '\n'.join([
f'From: {self.from_address}',
f' {self.from_name}',
f'Reply-To: {self.reply_to_address}',
f' {self.reply_to_name}',
f'To: {self.to_address}',
f' {self.to_name}',
f'Subject: {self.subject}',
f'Date: {self.date}',
f'Time: {self.time}',
f'Timezone: {self.timezone}',
f'Locally: {self.local_date}',
f' {self.local_time}',
# '',
# ' Received: {}'.format(self.received),
# ' Return-Path: {}'.format(self.return_path),
# ' Envelope-To: {}'.format(self.envelope_to),
# ' Message-Id: {}'.format(self.message_id),
# ' Content-Type: {}'.format(self.content_type),
# 'Other headers:',
# '\n'.join([' {}: {}'.format(k, v) for k, v in self.other_headers]),
])
def str_headers_compact(self):
return '\n'.join([
f'From: {self.from_address} {self.from_name}',
f'Reply-To: {self.reply_to_address} {self.reply_to_name}',
f'To: {self.to_address} {self.to_name}',
f'Subject: {self.subject}',
f'Datetime: {self.date} {self.time} {self.timezone}'
])
def str_quote(self):
raise NotImplementedError()
def str_forward(self):
raise NotImplementedError()
def str_complete(self):
return '\n'.join([
self.str_headers(),
'',
f'id: {self._origin_id}',
f'flags: {self.flags}',
'',
'contents{}:'.format(f' (multipart, {len(self.contents)} parts)'
if len(self.contents) > 1 else ''),
80*'=',
(80*'=' + '\n').join(self.contents),
80*'=',
])
def __str__(self):
return self.str_oneline()
def __repr__(self):
return self.str_headers_compact()
| apache-2.0 | -3,272,470,753,199,274,000 | 37.36747 | 97 | 0.553776 | false | 3.935125 | false | false | false |
outpark/skeleton | migrations/versions/56a9f1dcf35_.py | 1 | 2870 | """empty message
Revision ID: 56a9f1dcf35
Revises: None
Create Date: 2015-05-05 23:24:42.576471
"""
# revision identifiers, used by Alembic.
revision = '56a9f1dcf35'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('questions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=400), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('answers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=400), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('likes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('answer_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['answer_id'], ['answers.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('likes')
op.drop_table('answers')
op.drop_table('questions')
op.drop_table('roles')
op.drop_table('users')
### end Alembic commands ###
| bsd-3-clause | 2,298,538,285,896,855,600 | 35.794872 | 65 | 0.650523 | false | 3.462002 | false | false | false |
kevingu1003/python-pptx | tests/oxml/parts/test_presentation.py | 4 | 1597 | # encoding: utf-8
"""
Test suite for pptx.oxml.presentation module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from ..unitdata.presentation import a_sldId, a_sldIdLst
class DescribeCT_SlideIdList(object):
def it_can_add_a_sldId_element_as_a_child(self, add_fixture):
sldIdLst, expected_xml = add_fixture
sldIdLst.add_sldId('rId1')
assert sldIdLst.xml == expected_xml
def it_knows_the_next_available_slide_id(self, next_id_fixture):
sldIdLst, expected_id = next_id_fixture
assert sldIdLst._next_id == expected_id
# fixtures -------------------------------------------------------
@pytest.fixture
def add_fixture(self):
sldIdLst = a_sldIdLst().with_nsdecls().element
expected_xml = (
a_sldIdLst().with_nsdecls('p', 'r').with_child(
a_sldId().with_id(256).with_rId('rId1'))
).xml()
return sldIdLst, expected_xml
@pytest.fixture(params=[
((), 256),
((256,), 257), ((257,), 256), ((300,), 256), ((255,), 256),
((257, 259), 256), ((256, 258), 257), ((256, 257), 258),
((257, 258, 259), 256), ((256, 258, 259), 257),
((256, 257, 259), 258), ((258, 256, 257), 259),
])
def next_id_fixture(self, request):
existing_ids, expected_id = request.param
sldIdLst_bldr = a_sldIdLst().with_nsdecls()
for n in existing_ids:
sldIdLst_bldr.with_child(a_sldId().with_id(n))
sldIdLst = sldIdLst_bldr.element
return sldIdLst, expected_id
| mit | -559,583,534,860,502,140 | 31.591837 | 72 | 0.572949 | false | 3.071154 | false | false | false |
nla/jvmctl | logduct/tests/test_logduct.py | 1 | 1864 | #!/usr/bin/env python2
import os, re, tempfile, shutil, time, signal, unittest, sys
from subprocess import Popen, check_call
def slurp(file):
with open(file) as f:
return f.read()
def wait_until_exists(file, timeout=1, delay=0.02):
start = time.time()
while time.time() < start + timeout:
if os.path.exists(file):
return
time.sleep(delay)
raise Exception("timeout waiting for " + file)
def run_tests(tmpdir):
socket_file = os.path.join(tmpdir, "logductd.sock")
logs_dir = os.path.join(tmpdir, "logs")
daemon = Popen([sys.executable, "-m", "logduct.daemon", "-s", socket_file, "-d", logs_dir, "--trust-blindly"])
unit = "dummyunit"
stdio_log = os.path.join(logs_dir, unit, "stdio.log")
third_log = os.path.join(logs_dir, unit, "third.log")
try:
wait_until_exists(socket_file)
# stdio
check_call([sys.executable, "-m", "logduct.run", "-s", socket_file, "-u", unit, "echo", "hello"])
wait_until_exists(stdio_log)
data = slurp(stdio_log)
match = re.match(r"\d\d:\d\d:\d\d.\d\d\d (unknown|echo)\[\d+\]: hello\n", data)
assert match
# pipe fd
check_call([sys.executable, "-m", "logduct.run", "-s", socket_file, "-u", unit, "--fd", "3:third",
"--no-stdio", "bash", "-c", "echo there >&3"])
wait_until_exists(third_log)
data = slurp(third_log)
match = re.match(r"\d\d:\d\d:\d\d.\d\d\d: there\n", data)
assert match
finally:
daemon.send_signal(signal.SIGTERM)
time.sleep(0.2)
daemon.kill()
def main():
try:
tmpdir = tempfile.mkdtemp("logduct-test")
run_tests(tmpdir)
finally:
shutil.rmtree(tmpdir)
class Test(unittest.TestCase):
def test(self):
main()
if __name__ == '__main__': main()
| mit | -7,207,820,850,151,245,000 | 30.066667 | 114 | 0.571888 | false | 3.117057 | true | false | false |
pvtodorov/indra | indra/sources/sofia/make_sofia_ontology.py | 2 | 1626 | import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
| bsd-2-clause | 6,992,427,790,364,819,000 | 33.595745 | 76 | 0.600246 | false | 3.085389 | false | false | false |
jkroll20/tlgbackend | filtermodules/recent.py | 2 | 1920 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import time
from tlgflaws import *
## Ein Filter, der alle Seiten findet, die heute geändert wurden.
class FRecentlyChanged(FlawFilter):
shortname= 'RecentlyChanged' # Name, der den Filter identifiziert (nicht übersetzen!)
label= _('Recently Changed') # Label, das im Frontend neben der Checkbox angezeigt wird
description= _('Page was touched today.') # Längerer Beschreibungstext für Tooltip
group= _('Timeliness') # Gruppe, in die der Filter eingeordnet wird
# Die Action-Klasse für diesen Filter
class Action(TlgAction):
# execute() filtert die Seiten und steckt Ergebnisse in resultQueue.
def execute(self, resultQueue):
cur= getCursors()[self.wiki]
# Formatstrings für mehrere Seiten generieren.
format_strings = ','.join(['%s'] * len(self.pageIDs))
# Beginn des heutigen Tages im Format der Wikipedia-Datenbank
today= time.strftime( '%Y%m%d000000', time.localtime(time.time()) )
params= []
params.extend(self.pageIDs)
params.append(today)
# Subset der Seiten finden, die heute geändert wurden
cur.execute('SELECT * FROM page WHERE page_id IN (%s) AND page_touched >= %%s' % format_strings, params)
changed= cur.fetchall()
# Alle gefundenen Seiten zurückgeben
for row in changed:
resultQueue.put(TlgResult(self.wiki, row, self.parent))
# Wir wollen 100 Seiten pro Aktion verarbeiten.
def getPreferredPagesPerAction(self):
return 100
# Eine Aktion erstellen.
def createActions(self, wiki, pages, actionQueue):
actionQueue.put(self.Action(self, wiki, pages))
# Beim Laden des Moduls den Filter registrieren:
#FlawFilters.register(FRecentlyChanged)
| gpl-3.0 | -4,140,035,752,873,094,000 | 38.020408 | 116 | 0.638075 | false | 3.476364 | false | false | false |
googleapis/googleapis-gen | google/cloud/retail/v2/retail-v2-py/google/cloud/retail_v2/services/catalog_service/transports/grpc_asyncio.py | 1 | 13035 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.retail_v2.types import catalog as gcr_catalog
from google.cloud.retail_v2.types import catalog_service
from .base import CatalogServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import CatalogServiceGrpcTransport
class CatalogServiceGrpcAsyncIOTransport(CatalogServiceTransport):
"""gRPC AsyncIO backend transport for CatalogService.
Service for managing catalog configuration.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'retail.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'retail.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_catalogs(self) -> Callable[
[catalog_service.ListCatalogsRequest],
Awaitable[catalog_service.ListCatalogsResponse]]:
r"""Return a callable for the list catalogs method over gRPC.
Lists all the [Catalog][google.cloud.retail.v2.Catalog]s
associated with the project.
Returns:
Callable[[~.ListCatalogsRequest],
Awaitable[~.ListCatalogsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_catalogs' not in self._stubs:
self._stubs['list_catalogs'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2.CatalogService/ListCatalogs',
request_serializer=catalog_service.ListCatalogsRequest.serialize,
response_deserializer=catalog_service.ListCatalogsResponse.deserialize,
)
return self._stubs['list_catalogs']
@property
def update_catalog(self) -> Callable[
[catalog_service.UpdateCatalogRequest],
Awaitable[gcr_catalog.Catalog]]:
r"""Return a callable for the update catalog method over gRPC.
Updates the [Catalog][google.cloud.retail.v2.Catalog]s.
Returns:
Callable[[~.UpdateCatalogRequest],
Awaitable[~.Catalog]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_catalog' not in self._stubs:
self._stubs['update_catalog'] = self.grpc_channel.unary_unary(
'/google.cloud.retail.v2.CatalogService/UpdateCatalog',
request_serializer=catalog_service.UpdateCatalogRequest.serialize,
response_deserializer=gcr_catalog.Catalog.deserialize,
)
return self._stubs['update_catalog']
__all__ = (
'CatalogServiceGrpcAsyncIOTransport',
)
| apache-2.0 | -6,470,185,758,696,912,000 | 44.897887 | 87 | 0.612582 | false | 4.700685 | false | false | false |
yarsanich/openprocurement.tender.belowthreshold | openprocurement/tender/belowthreshold/views/tender.py | 1 | 6695 | # -*- coding: utf-8 -*-
from openprocurement.api.utils import context_unpack, json_view, APIResource
from openprocurement.tender.core.utils import (
save_tender, optendersresource, apply_patch
)
from openprocurement.tender.belowthreshold.utils import (
check_status,
)
from openprocurement.tender.core.validation import (
validate_patch_tender_data,
)
@optendersresource(name='belowThreshold:Tender',
path='/tenders/{tender_id}',
procurementMethodType='belowThreshold',
description="Open Contracting compatible data exchange format. See http://ocds.open-contracting.org/standard/r/master/#tender for more info")
class TenderResource(APIResource):
@json_view(permission='view_tender')
def get(self):
"""Tender Read
Get Tender
----------
Example request to get tender:
.. sourcecode:: http
GET /tenders/64e93250be76435397e8c992ed4214d1 HTTP/1.1
Host: example.com
Accept: application/json
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"data": {
"id": "64e93250be76435397e8c992ed4214d1",
"tenderID": "UA-64e93250be76435397e8c992ed4214d1",
"dateModified": "2014-10-27T08:06:58.158Z",
"procuringEntity": {
"id": {
"name": "Державне управління справами",
"scheme": "https://ns.openprocurement.org/ua/edrpou",
"uid": "00037256",
"uri": "http://www.dus.gov.ua/"
},
"address": {
"countryName": "Україна",
"postalCode": "01220",
"region": "м. Київ",
"locality": "м. Київ",
"streetAddress": "вул. Банкова, 11, корпус 1"
}
},
"value": {
"amount": 500,
"currency": "UAH",
"valueAddedTaxIncluded": true
},
"itemsToBeProcured": [
{
"description": "футляри до державних нагород",
"primaryClassification": {
"scheme": "CPV",
"id": "44617100-9",
"description": "Cartons"
},
"additionalClassification": [
{
"scheme": "ДКПП",
"id": "17.21.1",
"description": "папір і картон гофровані, паперова й картонна тара"
}
],
"unitOfMeasure": "item",
"quantity": 5
}
],
"enquiryPeriod": {
"endDate": "2014-10-31T00:00:00"
},
"tenderPeriod": {
"startDate": "2014-11-03T00:00:00",
"endDate": "2014-11-06T10:00:00"
},
"awardPeriod": {
"endDate": "2014-11-13T00:00:00"
},
"deliveryDate": {
"endDate": "2014-11-20T00:00:00"
},
"minimalStep": {
"amount": 35,
"currency": "UAH"
}
}
}
"""
if self.request.authenticated_role == 'chronograph':
tender_data = self.context.serialize('chronograph_view')
else:
tender_data = self.context.serialize(self.context.status)
return {'data': tender_data}
@json_view(content_type="application/json", validators=(validate_patch_tender_data, ), permission='edit_tender')
def patch(self):
"""Tender Edit (partial)
For example here is how procuring entity can change number of items to be procured and total Value of a tender:
.. sourcecode:: http
PATCH /tenders/4879d3f8ee2443169b5fbbc9f89fa607 HTTP/1.1
Host: example.com
Accept: application/json
{
"data": {
"value": {
"amount": 600
},
"itemsToBeProcured": [
{
"quantity": 6
}
]
}
}
And here is the response to be expected:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"data": {
"id": "4879d3f8ee2443169b5fbbc9f89fa607",
"tenderID": "UA-64e93250be76435397e8c992ed4214d1",
"dateModified": "2014-10-27T08:12:34.956Z",
"value": {
"amount": 600
},
"itemsToBeProcured": [
{
"quantity": 6
}
]
}
}
"""
tender = self.context
if self.request.authenticated_role != 'Administrator' and tender.status in ['complete', 'unsuccessful', 'cancelled']:
self.request.errors.add('body', 'data', 'Can\'t update tender in current ({}) status'.format(tender.status))
self.request.errors.status = 403
return
if self.request.authenticated_role == 'chronograph':
apply_patch(self.request, save=False, src=self.request.validated['tender_src'])
check_status(self.request)
save_tender(self.request)
else:
apply_patch(self.request, src=self.request.validated['tender_src'])
self.LOGGER.info('Updated tender {}'.format(tender.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_patch'}))
return {'data': tender.serialize(tender.status)}
| apache-2.0 | -4,368,081,525,457,636,000 | 35.882022 | 160 | 0.436405 | false | 4.313403 | false | false | false |
atlassian/dd-agent | checks.d/ntp.py | 34 | 1756 | # 3p
import ntplib
# project
from checks import AgentCheck
from utils.ntp import get_ntp_args, set_user_ntp_settings
DEFAULT_OFFSET_THRESHOLD = 60 # in seconds
class NtpCheck(AgentCheck):
DEFAULT_MIN_COLLECTION_INTERVAL = 900 # in seconds
def check(self, instance):
service_check_msg = None
offset_threshold = instance.get('offset_threshold', DEFAULT_OFFSET_THRESHOLD)
try:
offset_threshold = int(offset_threshold)
except (TypeError, ValueError):
raise Exception('Must specify an integer value for offset_threshold. Configured value is %s' % repr(offset_threshold))
set_user_ntp_settings(dict(instance))
req_args = get_ntp_args()
self.log.debug("Using ntp host: {0}".format(req_args['host']))
try:
ntp_stats = ntplib.NTPClient().request(**req_args)
except ntplib.NTPException:
self.log.debug("Could not connect to NTP Server {0}".format(
req_args['host']))
status = AgentCheck.UNKNOWN
ntp_ts = None
else:
ntp_offset = ntp_stats.offset
# Use the ntp server's timestamp for the time of the result in
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge('ntp.offset', ntp_offset, timestamp=ntp_ts)
if abs(ntp_offset) > offset_threshold:
status = AgentCheck.CRITICAL
service_check_msg = "Offset {0} secs higher than offset threshold ({1} secs)".format(ntp_offset, offset_threshold)
else:
status = AgentCheck.OK
self.service_check('ntp.in_sync', status, timestamp=ntp_ts, message=service_check_msg)
| bsd-3-clause | 7,355,940,819,758,120,000 | 34.12 | 130 | 0.617312 | false | 4.027523 | false | false | false |
pichillilorenzo/JavaScriptEnhancements | src/listeners/show_flow_errors.py | 1 | 10428 | import sublime, sublime_plugin
import cgi, time, os
from ..libs import NodeJS
from ..libs import FlowCLI
from ..libs import flow
from ..libs import util
from ..libs.popup_manager import popup_manager
from .wait_modified_async import JavascriptEnhancementsWaitModifiedAsyncViewEventListener
show_flow_errors_css = ""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "show_flow_errors.css"), encoding="utf-8") as css_file:
show_flow_errors_css = "<style>"+css_file.read()+"</style>"
class JavascriptEnhancementsShowFlowErrorsViewEventListener(JavascriptEnhancementsWaitModifiedAsyncViewEventListener, sublime_plugin.ViewEventListener):
description_by_row_column = {}
diagnostics = {
"error": [],
"warning": []
}
diagnostic_regions = {
"error": [],
"warning": []
}
diagnostic_scope = {
"error": "storage",
"warning": "keyword"
}
callback_setted_use_flow_checker_on_current_view = False
prefix_thread_name = "javascript_enhancements_show_flow_errors_view_event_listener"
wait_time = .15
modified = False
def on_load_async(self):
self.on_modified_async()
def on_activated_async(self):
self.on_modified_async()
def on_modified(self):
self.modified = True
def on_modified_async(self):
super(JavascriptEnhancementsShowFlowErrorsViewEventListener, self).on_modified_async()
def on_selection_modified_async(self):
view = self.view
sel = view.sel()[0]
if view.find_by_selector('source.js.embedded.html') and (self.diagnostics["error"] or self.diagnostics["warning"] or view.get_regions("javascript_enhancements_flow_error") or view.get_regions("javascript_enhancements_flow_warning")):
pass
elif not util.selection_in_js_scope(view) or not self.are_there_errors():
flow.hide_errors(view)
return
for key, value in self.diagnostics.items():
if not value and not view.get_regions("javascript_enhancements_flow_error"):
flow.hide_errors(view, level=key)
error_region = None
error_level = ""
for region in view.get_regions("javascript_enhancements_flow_error"):
if region.contains(sel):
error_region = region
error_level = "error"
break
if not error_region:
for region in view.get_regions("javascript_enhancements_flow_warning"):
if region.contains(sel):
error_region = region
error_level = "warning"
break
if not self.can_check():
return
error_description = ""
if error_region:
row_region, col_region = view.rowcol(error_region.begin())
end_row_region, endcol_region = view.rowcol(error_region.end())
try :
error_description = self.description_by_row_column[str(row_region)+":"+str(end_row_region)+":"+str(col_region)+":"+str(endcol_region)+":"+error_level]
except KeyError as e:
if str(row_region+1)+":"+str(row_region+1)+":0:0:"+error_level in self.description_by_row_column:
error_description = self.description_by_row_column[str(row_region+1)+":"+str(row_region+1)+":0:0:"+error_level]
for key, value in self.diagnostics.items():
if value:
error_count = len(value)
error_count_text = 'Flow: {} {}{}'.format(
error_count, key, '' if error_count is 1 else 's'
)
if error_level == key and error_region:
view.set_status(
'javascript_enhancements_flow_' + key, error_count_text + ': ' + error_description
)
else:
view.set_status('javascript_enhancements_flow_' + key, error_count_text)
def on_modified_async_with_thread(self, recheck=True):
self.modified = False
view = self.view
if view.find_by_selector('source.js.embedded.html'):
pass
elif not util.selection_in_js_scope(view):
flow.hide_errors(view)
return
if not self.can_check():
return
self.wait()
flow_cli = FlowCLI(view)
result = flow_cli.check_contents()
self.diagnostics = {
"error": [],
"warning": []
}
self.diagnostic_regions = {
"error": [],
"warning": []
}
self.description_by_row_column = {}
if result[0] and len(result[1]['errors']) > 0:
for error in result[1]['errors']:
description = ''
operation = error.get('operation')
row = -1
error_level = error['level']
self.diagnostics[error_level].append(error)
for i in range(len(error['message'])):
message = error['message'][i]
# check if the error path is the same file opened on the current view.
# this check is done because sometimes flow put errors from other files (for example when defining new flow definitions)
if message['path'] and message['path'] != view.file_name():
continue
if i == 0 :
row = int(message['line']) - 1
endrow = int(message['endline']) - 1
col = int(message['start']) - 1
endcol = int(message['end'])
self.diagnostic_regions[error_level].append(util.rowcol_to_region(view, row, endrow, col, endcol))
if operation:
description += operation["descr"]
if not description :
description += "'"+message['descr']+"'"
else :
description += " " + message['descr']
if row >= 0 :
self.description_by_row_column[str(row)+":"+str(endrow)+":"+str(col)+":"+str(endcol)+":"+error_level] = description
if not self.modified :
need_update_sublime_status = False
for key, value in self.diagnostic_regions.items():
view.erase_regions('javascript_enhancements_flow_' + key)
if value:
view.add_regions( 'javascript_enhancements_flow_' + key, value, self.diagnostic_scope[key], 'dot', sublime.DRAW_SQUIGGLY_UNDERLINE | sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE )
if not need_update_sublime_status:
need_update_sublime_status = True
else:
view.erase_status("javascript_enhancements_flow_" + key)
if need_update_sublime_status:
self.on_selection_modified_async()
elif (recheck) :
sublime.set_timeout_async(lambda: self.on_modified_async_with_thread(recheck=False))
def on_hover(self, point, hover_zone) :
view = self.view
if view.find_by_selector('source.js.embedded.html') and (self.diagnostics["error"] or self.diagnostics["warning"] or view.get_regions("javascript_enhancements_flow_error") or view.get_regions("javascript_enhancements_flow_warning")):
pass
elif not util.selection_in_js_scope(view) or not self.are_there_errors():
flow.hide_errors(view)
return
for key, value in self.diagnostics.items():
if not value and not view.get_regions("javascript_enhancements_flow_error"):
flow.hide_errors(view, level=key)
if hover_zone != sublime.HOVER_TEXT :
return
sel = sublime.Region(point, point)
is_hover_error = False
region_hover_error = None
error_level = ""
for region in view.get_regions("javascript_enhancements_flow_error"):
if region.contains(sel):
region_hover_error = region
is_hover_error = True
error_level = "error"
break
if not is_hover_error:
for region in view.get_regions("javascript_enhancements_flow_warning"):
if region.contains(sel):
region_hover_error = region
is_hover_error = True
error_level = "warning"
break
if not is_hover_error:
return
if not self.can_check():
return
row_region, col_region = view.rowcol(region_hover_error.begin())
end_row_region, endcol_region = view.rowcol(region_hover_error.end())
error = None
try :
error = self.description_by_row_column[str(row_region)+":"+str(end_row_region)+":"+str(col_region)+":"+str(endcol_region)+":"+error_level]
except KeyError as e:
if str(row_region+1)+":"+str(row_region+1)+":0:0:"+error_level in self.description_by_row_column:
error = self.description_by_row_column[str(row_region+1)+":"+str(row_region+1)+":0:0:"+error_level]
if error:
text = cgi.escape(error).split(" ")
html = ""
i = 0
while i < len(text) - 1:
html += text[i] + " " + text[i+1] + " "
i += 2
if i % 10 == 0 :
html += " <br> "
if len(text) % 2 != 0 :
html += text[len(text) - 1]
row_region, col_region = view.rowcol(region_hover_error.begin())
end_row_region, endcol_region = view.rowcol(region_hover_error.end())
# here the css code for the <a> element is not working, so the style is inline.
popup_manager.set_visible("javascript_enhancements_flow_" + error_level, True)
view.show_popup("""
<html>
<body>
""" + show_flow_errors_css + """
""" + html + """
<br>
<a style="display: block; margin-top: 10px; color: #333;" class="copy-to-clipboard" href="copy_to_clipboard">Copy</a>
</body>
</html>""", sublime.HIDE_ON_MOUSE_MOVE_AWAY, point, 1150, 80, lambda action: sublime.set_clipboard(error) or view.hide_popup(), lambda: popup_manager.set_visible("javascript_enhancements_flow_" + error_level, False) )
def can_check(self):
view = self.view
settings = util.get_project_settings()
if settings :
if not settings["project_settings"]["flow_checker_enabled"] or not util.is_project_view(view) :
flow.hide_errors(view)
return False
elif settings["project_settings"]["flow_checker_enabled"] :
comments = view.find_by_selector('source.js comment')
flow_comment_found = False
for comment in comments:
if "@flow" in view.substr(comment) :
flow_comment_found = True
break
if not flow_comment_found :
flow.hide_errors(view)
return False
elif not view.settings().get("javascript_enhancements_use_flow_checker_on_current_view") :
flow.hide_errors(view)
return False
return True
def are_there_errors(self):
view = self.view
return True if self.diagnostics["error"] or self.diagnostics["warning"] or view.get_regions("javascript_enhancements_flow_error") or view.get_regions("javascript_enhancements_flow_warning") else False | mit | -838,735,255,108,730,000 | 33.305921 | 237 | 0.625623 | false | 3.565128 | false | false | false |
dimara/synnefo | snf-astakos-app/astakos/im/views/target/redirect.py | 7 | 4777 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.http import urlencode
from django.contrib.auth import authenticate
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseForbidden)
from django.core.exceptions import ValidationError
from django.views.decorators.http import require_http_methods
from urlparse import urlunsplit, urlsplit, parse_qsl
from astakos.im.util import restrict_next
from astakos.im.user_utils import login as auth_login, logout
from astakos.im.views.decorators import cookie_fix
import astakos.im.messages as astakos_messages
from astakos.im.settings import REDIRECT_ALLOWED_SCHEMES
import logging
logger = logging.getLogger(__name__)
@require_http_methods(["GET"])
@cookie_fix
def login(request):
"""
If there is no `next` request parameter returns 400 (BAD REQUEST).
Otherwise, if `next` request parameter is not among the allowed schemes,
returns 403 (Forbidden).
If the request user is authenticated and has signed the approval terms,
redirects to `next` request parameter. If not, redirects to approval terms
in order to return back here after agreeing with the terms.
Otherwise, redirects to login in order to return back here after successful
login.
"""
next = request.GET.get('next')
if not next:
return HttpResponseBadRequest('Missing next parameter')
if not restrict_next(next, allowed_schemes=REDIRECT_ALLOWED_SCHEMES):
return HttpResponseForbidden(_(
astakos_messages.NOT_ALLOWED_NEXT_PARAM))
force = request.GET.get('force', None)
response = HttpResponse()
if force == '' and request.user.is_authenticated():
logout(request)
if request.user.is_authenticated():
# if user has not signed the approval terms
# redirect to approval terms with next the request path
if not request.user.signed_terms:
# first build next parameter
parts = list(urlsplit(request.build_absolute_uri()))
params = dict(parse_qsl(parts[3], keep_blank_values=True))
parts[3] = urlencode(params)
next = urlunsplit(parts)
# build url location
parts[2] = reverse('latest_terms')
params = {'next': next}
parts[3] = urlencode(params)
url = urlunsplit(parts)
response['Location'] = url
response.status_code = 302
return response
renew = request.GET.get('renew', None)
if renew == '':
request.user.renew_token(
flush_sessions=True,
current_key=request.session.session_key
)
try:
request.user.save()
except ValidationError, e:
return HttpResponseBadRequest(e)
# authenticate before login
user = authenticate(
username=request.user.username,
auth_token=request.user.auth_token
)
auth_login(request, user)
logger.info('Token reset for %s' % user.username)
parts = list(urlsplit(next))
parts[3] = urlencode({
'uuid': request.user.uuid,
'token': request.user.auth_token
})
url = urlunsplit(parts)
response['Location'] = url
response.status_code = 302
return response
else:
# redirect to login with next the request path
# first build next parameter
parts = list(urlsplit(request.build_absolute_uri()))
params = dict(parse_qsl(parts[3], keep_blank_values=True))
# delete force parameter
if 'force' in params:
del params['force']
parts[3] = urlencode(params)
next = urlunsplit(parts)
# build url location
parts[2] = reverse('login')
params = {'next': next}
parts[3] = urlencode(params)
url = urlunsplit(parts)
response['Location'] = url
response.status_code = 302
return response
| gpl-3.0 | -4,838,227,677,632,817,000 | 36.614173 | 79 | 0.65313 | false | 4.358577 | false | false | false |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/astroid/astroid/__pkginfo__.py | 1 | 1947 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radosław Ganczarek <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017 Hugo <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Calen Pennington <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2019 Uilian Ries <[email protected]>
# Copyright (c) 2019 Thomas Hisch <[email protected]>
# Copyright (c) 2020-2021 hippo91 <[email protected]>
# Copyright (c) 2020 David Gilman <[email protected]>
# Copyright (c) 2020 Konrad Weihmann <[email protected]>
# Copyright (c) 2020 Felix Mölder <[email protected]>
# Copyright (c) 2020 Michael <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
"""astroid packaging information"""
from typing import Optional
__version__ = "2.5.6"
# For an official release, use 'alpha_version = False' and 'dev_version = None'
alpha_version: bool = False # Release will be an alpha version if True (ex: '1.2.3a6')
dev_version: Optional[int] = None
if dev_version is not None:
if alpha_version:
__version__ += f"a{dev_version}"
else:
__version__ += f".dev{dev_version}"
version = __version__
| mit | 9,205,893,398,048,571,000 | 45.261905 | 87 | 0.726711 | false | 2.625676 | false | false | false |
PROSIC/prosic-evaluation | scripts/plot-fdr-control.py | 1 | 1781 | from itertools import product
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import common
import numpy as np
MIN_CALLS = 100
colors = common.get_colors(snakemake.config)
props = product(snakemake.params.callers,
snakemake.params.len_ranges, snakemake.params.fdrs)
calls = []
for _calls, (caller, len_range, fdr) in zip(snakemake.input.varlociraptor_calls, props):
calls.append({"caller": caller, "len_range": len_range, "fdr": float(fdr), "calls": _calls})
calls = pd.DataFrame(calls)
calls = calls.set_index("caller", drop=False)
def plot_len_range(minlen, maxlen):
def plot(caller):
color = colors[caller]
label = "varlociraptor+{}".format(caller)
fdrs = []
alphas = []
calls_ = calls.loc[caller]
calls_ = calls_[calls_["len_range"].map(lambda r: r == [minlen, maxlen])]
calls_ = calls_.sort_values("fdr")
for e in calls_.itertuples():
c = pd.read_table(e.calls)
n = c.shape[0]
if n < MIN_CALLS:
continue
true_fdr = 1.0 - common.precision(c)
if fdrs and fdrs[-1] == true_fdr:
continue
fdrs.append(true_fdr)
alphas.append(e.fdr)
plt.plot(alphas, fdrs, ".-", color=color, label=label)
for caller in calls.index.unique():
plot(caller)
plt.plot([0, 1], [0, 1], ":", color="grey")
sns.despine()
ax = plt.gca()
handles, _ = ax.get_legend_handles_labels()
return ax, handles
common.plot_ranges(
snakemake.params.len_ranges,
plot_len_range,
xlabel="FDR threshold",
ylabel="true FDR")
plt.savefig(snakemake.output[0], bbox_inches="tight")
| mit | 1,726,927,584,657,452,300 | 25.984848 | 96 | 0.606401 | false | 3.232305 | false | false | false |
RedHatInsights/insights-core | insights/parsers/tests/test_jboss_standalone_main_conf.py | 1 | 3625 | from insights.parsers.jboss_standalone_main_conf import JbossStandaloneConf
from insights.tests import context_wrap
from insights.parsers import jboss_standalone_main_conf
import doctest
JBOSS_STANDALONE_CONFIG = """
<?xml version='1.0' encoding='UTF-8'?>
<server xmlns="urn:jboss:domain:1.7">
<management>
<security-realms>
<security-realm name="ManagementRealm">
<authentication>
<local default-user="$local" skip-group-loading="true"/>
<properties path="mgmt-users.properties" relative-to="jboss.server.config.dir"/>
</authentication>
<authorization map-groups-to-roles="false">
<properties path="mgmt-groups.properties" relative-to="jboss.server.config.dir"/>
</authorization>
</security-realm>
<security-realm name="ApplicationRealm">
<authentication>
<local default-user="$local" allowed-users="*" skip-group-loading="true"/>
<properties path="application-users.properties" relative-to="jboss.server.config.dir"/>
</authentication>
<authorization>
<properties path="application-roles.properties" relative-to="jboss.server.config.dir"/>
</authorization>
</security-realm>
</security-realms>
<audit-log>
<formatters>
<json-formatter name="json-formatter"/>
</formatters>
<handlers>
<file-handler name="file" formatter="json-formatter" path="audit-log.log" relative-to="jboss.server.data.dir"/>
</handlers>
<logger log-boot="true" log-read-only="false" enabled="false">
<handlers>
<handler name="file"/>
</handlers>
</logger>
</audit-log>
<management-interfaces>
<native-interface security-realm="ManagementRealm">
<socket-binding native="management-native"/>
</native-interface>
<http-interface security-realm="ManagementRealm">
<socket-binding http="management-http"/>
</http-interface>
</management-interfaces>
<access-control provider="simple">
<role-mapping>
<role name="SuperUser">
<include>
<user name="$local"/>
</include>
</role>
</role-mapping>
</access-control>
</management>
</server>
"""
def test_jboss_standalone_conf():
jboss_standalone_conf = JbossStandaloneConf(
context_wrap(JBOSS_STANDALONE_CONFIG, path="/root/jboss/jboss-eap-6.4/standalone/configuration/standalone.xml"))
assert jboss_standalone_conf is not None
assert jboss_standalone_conf.file_path == "/root/jboss/jboss-eap-6.4/standalone/configuration/standalone.xml"
assert jboss_standalone_conf.get_elements(
".//management/security-realms/security-realm/authentication/properties")[0].get(
"relative-to") == 'jboss.server.config.dir'
def test_jboss_standalone_conf_doc_examples():
env = {
'JbossStandaloneConf': JbossStandaloneConf,
'jboss_main_config': JbossStandaloneConf(context_wrap(JBOSS_STANDALONE_CONFIG,
path='/root/jboss/jboss-eap-6.4/standalone/configuration/standalone.xml'))
}
failed, total = doctest.testmod(jboss_standalone_main_conf, globs=env)
assert failed == 0
| apache-2.0 | -3,273,449,470,013,918,000 | 42.674699 | 136 | 0.586207 | false | 4.200463 | true | false | false |
churchill-lab/qtl-viewer | qtlviewer/test_cache.py | 1 | 1606 | import requests
def split_url(url):
"""Splits the given URL into a tuple of (protocol, host, uri)"""
proto, rest = url.split(':', 1)
rest = rest[2:].split('/', 1)
host, uri = (rest[0], rest[1]) if len(rest) == 2 else (rest[0], "")
return (proto, host, uri)
def parse_url(url):
"""Parses out Referer info indicating the request is from a previously proxied page.
For example, if:
Referer: http://localhost:8080/proxy/google.com/search?q=foo
then the result is:
("google.com", "search?q=foo")
"""
proto, host, uri = split_url(url)
print ("url={}".format(url))
print ("{}, {}, {}".format(proto, host, uri))
if uri.find("/") < 0:
return None
first, rest = uri.split("/", 1)
return {'proto':proto, 'host':host, 'uri':uri, 'url':rest}
@app.route('/proxy/<path:url>', methods=['GET'])
def proxy_get(url):
rd = parse_url(request.url)
print("rd=", str(rd))
#print ('request.path={}'.format(request.path))
#print ('request.full_path={}'.format(request.full_path))
#print ('request.script_root={}'.format(request.script_root))
#print ('request.base_url={}'.format(request.base_url))
#print ('request.url={}'.format(request.url))
#print ('request.url_root={}'.format(request.url_root))
LOG.debug("Fetching {}".format(url))
#_params = {} if params is None else params
r = requests.get(rd['url']) #, params=_params)
print("From cache: {}".format(r.from_cache))
print('headers=', dict(r.headers))
return Response(r.content, headers=dict(r.headers)), r.status_code
| gpl-3.0 | -3,121,924,115,229,837,000 | 28.740741 | 88 | 0.602117 | false | 3.352818 | false | false | false |
AntaresConsulting/odoo-marble | product_marble/models/_common.py | 1 | 12388 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import osv
from openerp.osv import fields
import logging
_logger = logging.getLogger(__name__)
# import pdb; pdb.set_trace()
# import vimpdb; vimpdb.hookPdb()
# set propertie global...
RAW = 'raw'
BACHA = 'bacha'
SERVICE = 'service'
INPUT = 'input'
OTHER = '*'
M2 = 'm2'
AREA = 'area'
UNITS = 'units'
LOC_DESPACHO = 'loc_despacho'
LOC_STOCK = 'stock'
LOC_REC_STOCK = 'rec_stock'
LOC_OWN_STOCK = 'own'
LOC_CUSTOMERS = 'customers'
MAIN_COMPANY = 'company'
_xml_data = {
# ---- Prod Categ -----
RAW : 'product_marble.prod_categ_raw_material',
BACHA : 'product_marble.prod_categ_bachas',
SERVICE: 'product_marble.prod_categ_services',
INPUT : 'product_marble.prod_categ_inputs',
# ---- Prod UOM -----
M2 : 'product_marble.product_uom_square_meter',
AREA : 'product_marble.product_uom_categ_area',
UNITS : 'product.product_uom_categ_unit',
# ---- Warehouse location Stock -----
LOC_DESPACHO : 'product_marble.location_deposito_despacho',
LOC_STOCK : 'product_marble.location_deposito_stock_propio',
LOC_OWN_STOCK : 'product_marble.location_deposito_stock_propio',
LOC_CUSTOMERS : 'product_marble.location_deposito_stock_clientes',
LOC_REC_STOCK : 'product_marble.location_deposito_stock_propio_recortes',
# ---- Base Company -----
MAIN_COMPANY : 'base.main_company',
}
_prop = {}
@api.model
def set_prop(self):
global _prop
for key in _xml_data:
xml_id = _xml_data[key]
if not _prop.get(key) or _prop.get(key) < 0:
ids = self.env.ref(xml_id)
_prop[key] = ids.id if ids and ids.id > 0 else -1
#_logger.info(">> set_prop >> _prop = %s", _prop)
@api.model
def get_prod_types(self):
_logger.info(">> get_prod_type >> 1- self = %s", self)
types = {
get_prop(self, RAW) : RAW,
get_prop(self, BACHA) : BACHA,
get_prop(self, SERVICE) : SERVICE,
get_prop(self, INPUT) : INPUT,
}
_logger.info(">> get_prod_type >> 2- types = %s", types)
return types
# --- Migracion -------------------------
#
# def get_prop(self, cr, uid, key):
# global _prop
# if not _prop.get(key):
# set_prop(self, cr, uid, [])
# return _prop[key]
@api.model
def get_prop(self, key):
global _prop
# valido db...
db = 'db_name'
db_name = _prop.get(db,False)
if (not db_name or db_name != self._cr.dbname):
_prop.clear()
_prop[db] = self._cr.dbname
#
if not _prop.get(key):
set_prop(self)
return _prop[key]
# ----------------------------------------
# --- Migracion -------------------------
#
# def get_raw_material_id(self, cr, uid):
# return get_prop(self, cr, uid, 'raw_material_id')
#
# def get_bachas_id(self, cr, uid):
# return get_prop(self, cr, uid, 'bachas_id')
#
# def get_services_id(self, cr, uid):
# return get_prop(self, cr, uid, 'services_id')
#
# def get_inputs_id(self, cr, uid):
# return get_prop(self, cr, uid, 'inputs_id')
#
# def get_uom_m2_id(self, cr, uid):
# return get_prop(self, cr, uid, 'uom_m2_id')
#
# def get_uom_units_id(self, cr, uid):
# return get_prop(self, cr, uid, 'uom_units_id')
@api.model
def get_raw_material_id(self):
return get_prop(self, RAW)
@api.model
def get_bachas_id(self):
return get_prop(self, BACHA)
@api.model
def get_services_id(self):
return get_prop(self, SERVICE)
@api.model
def get_inputs_id(self):
return get_prop(self, INPUT)
@api.model
def get_uom_m2_id(self):
return get_prop(self, M2)
@api.model
def get_uom_units_id(self):
return get_prop(self, UNITS)
@api.model
def get_location_despacho(self):
return get_prop(self, LOC_DESPACHO)
@api.model
def get_location_stock(self):
return get_prop(self, LOC_STOCK)
@api.model
def get_location_recortes_stock(self):
return get_prop(self, LOC_REC_STOCK)
@api.model
def get_location_own_id(self):
return get_prop(self, LOC_OWN_STOCK)
@api.model
def get_location_customers_id(self):
return get_prop(self, LOC_CUSTOMERS)
@api.model
def get_main_company_id(self):
return get_prop(self, MAIN_COMPANY)
# ----------------------------------------
# --- Migracion -------------------------
#
# def is_raw_material(self, cr, uid, cid):
# return (cid == get_prop(self, cr, uid, RAW))
#
# def is_bachas(self, cr, uid, cid):
# return (cid == get_prop(self, cr, uid, BACHAS))
#
# def is_services(self, cr, uid, cid):
# return (cid == get_prop(self, cr, uid, SERVICES))
#
# def is_inputs(self, cr, uid, cid):
# return (cid == get_prop(self, cr, uid, INPUTS))
@api.model
def is_raw_material(self, cid):
return (cid == get_prop(self, RAW))
@api.model
def is_bachas(self, cid):
return (cid == get_prop(self, BACHA))
@api.model
def is_services(self, cid):
return (cid == get_prop(self, SERVICE))
@api.model
def is_inputs(self, cid):
return (cid == get_prop(self, INPUT))
# ----------------------------------------
# def get_raw_material_id(self, cr, uid):
# # ids = self.pool.get('product.category').search(cr, uid, [('name','ilike','Marble Work')], limit=1)
# # return ids[0] or False
# return get_raw_material_id(self, cr, uid)
# def get_product_uom_m2_id(self, cr, uid):
# global _prop
# key = 'uom_m2_id'
#
# if (not _prop) or (_prop.get(key) < 0):
# obj = self.pool.get('product.uom')
# ids = obj.search(cr, uid, [('name','ilike','m2')], limit=1)
# _prop[key] = ids[0] if ids and ids[0] > 0 else -1
#
# # _logger.info("3 >> get_product_uom_m2_id >> _prop = %s", _prop)
# return _prop[key]
def is_raw_material_by_category_id(self, cr, uid, ids):
"""
- Obj: Determina si Category [ids] es Marble Work
si pertenece a la categ: Marble Work o no...
- Inp: [ids] = lista de category_id.
- Out: {category_id: true/false, ..}
"""
result = {}
if not ids:
return result
marble_work_id = get_raw_material_id(self, cr, uid)
result = {c:(c == marble_work_id) for c in ids}
# _logger.info("1 >> is_raw_material_by_category_id >> result = %s", result)
return result
def is_raw_material_by_product_id(self, cr, uid, ids):
"""
- Obj: Determina por cada producto [ids],
si pertenece a la categ: Marble Work o no...
- Inp: [ids] = lista de products ids.
- Out: {prod_id: is_marble, ..}
"""
result = {}
if not ids:
return result
marble_work_id = get_raw_material_id(self, cr, uid)
obj = self.pool.get('product.product')
for p in obj.read(cr, uid, ids, ['categ_id']):
result.update({p['id']: (marble_work_id == p['categ_id'][0])})
# _logger.info("1 >> is_raw_material_by_product_id >> result = %s", result)
return result
def is_bacha_by_product_id(self, cr, uid, ids):
result = {}
if not ids:
return result
bacha_id = get_bachas_id(self, cr, uid)
obj = self.pool.get('product.product')
for p in obj.read(cr, uid, ids, ['categ_id']):
result.update({p['id']: (bacha_id == p['categ_id'][0])})
# _logger.info("1 >> is_raw_material_by_product_id >> result = %s", result)
return result
def is_input_by_product_id(self, cr, uid, ids):
result = {}
if not ids:
return result
input_id = get_inputs_id(self, cr, uid)
obj = self.pool.get('product.product')
for p in obj.read(cr, uid, ids, ['categ_id']):
result.update({p['id']: (input_id == p['categ_id'][0])})
# _logger.info("1 >> is_raw_material_by_product_id >> result = %s", result)
return result
def is_service_by_product_id(self, cr, uid, ids):
result = {}
if not ids:
return result
service_id = get_services_id(self, cr, uid)
obj = self.pool.get('product.product')
for p in obj.read(cr, uid, ids, ['categ_id']):
result.update({p['id']: (service_id == p['categ_id'][0])})
# _logger.info("1 >> is_raw_material_by_product_id >> result = %s", result)
return result
# def get_data(self, cr, uid, list_tuple, fid):
# """
# - Obj: Recupero 'value' segun 'fid' (find id), en list_tuple...
# - Inp:
# arg 4: [list_tuple] = lista de tuplas: [(id, value to display), ..]
# arg 5: [fid] = 'fid' a localizar en 'list_tuple'.
# - Out: 'value' referenciado por 'fid'.
# """
# if not list_tuple or not fid:
# return ""
#
# return dict(list_tuple).get(fid)
# ---------- Stock ----------
def query_stock_move_input(self, cr, uid):
str = "\n\n >>> Stock Move Input <<<\n"
obj = self.pool.get('stock.move')
domain = [
'&','|',
'&',
('picking_id','=',False),
('location_id.usage', 'in', ['customer','supplier']),
'&',
('picking_id','!=',False),
('picking_id.type','=','in'),
('plaque_id','>','0')
]
ids = obj.search(cr, uid, domain)
_logger.info(">> query_stock_input >> 1 >> ids = %s", ids)
for m in obj.browse(cr, uid, ids):
str += "%s - %s - %s - %s - %s \n" % (m.id, m.product_uom, m.plaque_id, m.plaque_qty, m.name)
_logger.info(str)
return True
def query_stock_move_output(self, cr, uid):
str = "\n\n >>> Stock Move Output <<<\n"
obj = self.pool.get('stock.move')
domain = [
'&','|',
'&',
('picking_id','=',False),
('location_dest_id.usage', 'in', ['customer','supplier']),
'&',
('picking_id','!=',False),
('picking_id.type','=','out'),
('plaque_id','>','0')
]
ids = obj.search(cr, uid, domain)
_logger.info(">> query_stock_input >> 2 >> ids = %s", ids)
for m in obj.browse(cr, uid, ids):
str += "%s - %s - %s - %s - %s \n" % (m.id, m.product_uom, m.plaque_id, m.plaque_qty, m.name)
_logger.info(str)
return True
def get_stock_move_by_product(self, cr, uid, ids):
_logger.info(">> get_stock_move_by_product >> 000 >> ids = %s", ids)
str = "\n\n >>> Stock Move In/Out by Product <<<\n"
obj = self.pool.get('stock.move')
domain = [
# ('product_id.categ_id','=', get_raw_material_id(self, cr, uid)), << producto tipo marble.
('product_id', 'in', ids),
]
_logger.info(">> get_stock_move_by_product >> 111 >> domain = %s", domain)
ids = obj.search(cr, uid, domain)
_logger.info(">> get_stock_move_by_product >> 222 >> ids = %s", ids)
for m in obj.browse(cr, uid, ids):
str += "%s - %s - %s - %s - %s \n" % (m.id, m.product_uom, m.plaque_id, m.plaque_qty, m.name)
_logger.info(str)
return True
def query_stock_move_test(self, cr, uid):
query_stock_move_input(self, cr, uid)
query_stock_move_output(self, cr, uid)
# -------------------------------------------------------
def print_dict(msg, val):
nl = '\n'
res = msg + nl + nl
for k in val:
res += str(k) + ':' + str(val[k]) + nl
res += nl + nl
_logger.info(res)
# -------------------------------------------------------
def get_loc_parents(self, loc, res):
res += (loc and loc.id and [loc.id]) or []
if loc and loc.location_id:
get_loc_parents(self, loc.location_id, res)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | 1,740,580,119,350,780,200 | 27.090703 | 105 | 0.557233 | false | 2.941121 | false | false | false |
geoenvo/opendims | opendims/reports/templatetags/reports_tags.py | 1 | 1822 | from django import template
from django.db.models import Sum
from reports.models import Event
register = template.Library()
@register.simple_tag
def get_event_statistics(disaster, month, year):
"""
Return monthly summary of a disaster category and its impact.
"""
events_with_impacts = Event.objects.filter(
eventimpacts__isnull=False, disaster=disaster,
created__month=month, created__year=year
).distinct()
events_with_impacts = events_with_impacts.annotate(
evac_total=Sum('eventimpacts__evac_total'),
affected_total=Sum('eventimpacts__affected_total'),
affected_injury=Sum('eventimpacts__affected_injury'),
affected_death=Sum('eventimpacts__affected_death'),
loss_total=Sum('eventimpacts__loss_total')
)
return events_with_impacts
@register.simple_tag
def get_eventimpact_total(event_statistics):
"""
Calculate the sum impact of a particular disaster category.
"""
eventimpact_total = {
'evac_total': 0,
'affected_total': 0,
'affected_injury': 0,
'affected_death': 0,
'loss_total': 0
}
for event_statistic in event_statistics:
if event_statistic.evac_total:
eventimpact_total['evac_total'] += event_statistic.evac_total
if event_statistic.affected_total:
eventimpact_total['affected_total'] += event_statistic.affected_total
if event_statistic.affected_injury:
eventimpact_total['affected_injury'] += event_statistic.affected_injury
if event_statistic.affected_death:
eventimpact_total['affected_death'] += event_statistic.affected_death
if event_statistic.loss_total:
eventimpact_total['loss_total'] += event_statistic.loss_total
return eventimpact_total
| gpl-3.0 | 4,937,029,646,918,345,000 | 34.72549 | 83 | 0.670143 | false | 3.651303 | false | false | false |
wzguo/youker-assistant | backends/youker-assistant-daemon/src/policykit/dbusproxy.py | 1 | 2364 | #!/usr/bin/python
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# Copyright (C) 2007-2011 Tualatrix Chou <[email protected]>
# Copyright (C) 2013 ~ 2014 National University of Defense Technology(NUDT) & Kylin Ltd
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import dbus
import dbus.service
import logging
log = logging.getLogger("DbusProxy")
INTERFACE = 'com.ubuntukylin.youker'
UKPATH = '/'
SHOWED = False
def show_message(*args):
from dialogs import ErrorDialog
title = 'Daemon start failed'
message = ('Youker Assisant systemdaemon didn\'t start correctly.\n'
'If you want to help developers debugging, try to run "<b>sudo /usr/lib/python2.7/dist-packages/youker-assistant-daemon/src/start_systemdbus.py</b>" in a terminal.')
ErrorDialog(title=title, message=message).launch()
def nothing(*args):
return None
class DbusProxy:
try:
__system_bus = dbus.SystemBus()
__object = __system_bus.get_object(INTERFACE, UKPATH)
except Exception, e:
__object = None
def __getattr__(self, name):
global SHOWED
try:
return self.__object.get_dbus_method(name, dbus_interface=self.INTERFACE)
except Exception, e:
#log.error(e)
if not SHOWED:
SHOWED = True
return show_message
else:
return nothing
def get_object(self):
return self.__object
class AccessDeniedException(dbus.DBusException):
'''This exception is raised when some operation is not permitted.'''
_dbus_error_name = 'com.ubuntukylin.youker.AccessDeniedException'
def init_dbus(dbus_iface=INTERFACE, dbus_path=UKPATH):
'''init dbus'''
proxy = DbusProxy()
return proxy
if __name__ == '__main__':
print init_dbus()
| gpl-3.0 | -3,449,178,230,094,685,700 | 32.295775 | 181 | 0.677242 | false | 3.7824 | false | false | false |
Caleb-Shepard/Scripts | Scripts_General/tulz/xml_to_json.py | 1 | 9544 | # Script to recurse through all subdirectories
from pandas.io.json import json_normalize
import flatten_json
import xmltodict
import argparse
import zipfile
import shutil
import pandas
import tqdm
import json
import os
import re
''' ------------------------------ CONSTANTS ------------------------------ '''
JSON_WORKBOOKS_ARRAY_OPENER = '{"jsonified-workbooks":['
JSON_WORKBOOKS_ARRAY_CLOSER = ']}'
JSON_WORKBOOKS_ARRAY_DELIMITER = ','
STARTING_DIRECTORY = '/'
EXTRACT_NAME = 'extracted_workbook'
SECURITY_WARNING = r"""
SECURITY WARNING
This program relies on creating a temporary directory to extract Tableau
archives. Be sure that only appropriate users have access to tmp directories.
On DOS systems, the default tmp directory is located at "C:\Windows\Temp for
a system", and for an individual, the tmp directory is located at
"C:\Users\<username>\AppData\Local\Temp".
The default tmp directory in a nix system is located at /tmp
To prevent inappropriate access, local tmp directories are used. You may want to
customize the tmp location used dependent on your policy or need.
"""
'''
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the accumulator')
parser.add_argument('--tmpdir', dest='accumulate', action='store_const', const=sum, default=max, help='sum the integers (default: find the max)')
args = parser.parse_args()
'''
''' -------------------------- RUNTIME CONSTANTS -------------------------- '''
FOLDER_PATH = 'Users' + os.sep + os.getenv('username') + os.sep + 'Documents' + os.sep + 'Desktop' + os.sep + 'demo_folder'
DISK_PATH = "C:" + os.sep
STARTING_DIRECTORY = os.path.join(os.sep, DISK_PATH, FOLDER_PATH)
OUTPUT_DIRECTORY = os.path.join(os.sep, DISK_PATH, FOLDER_PATH)
TMP_DIRECTORY = os.path.join(os.sep, DISK_PATH, FOLDER_PATH)
REMOVE_IMAGE_RAW_BITS = True # This needs to come from some sort of command line argument
REMOVE_CUSTOM_SQL = False # This needs to come from some sort of command line argument
def unzip(zip_archive, destination):
print('suggested destination: ' + destination)
with zipfile.ZipFile(zip_archive, "r") as zip_archive:
zip_archive.extractall(destination)
# we don't want to merge python dictionaries, we want to add each dictionary to
# a larger dictionary
def get_all_workbooks(starting_directory, unzip_tmp_directory):
print('current dir: ' + starting_directory)
tableau_workbooks_xml = []
for item in os.listdir(starting_directory):
if item.endswith('.twb'): # straightforward xml extraction
tableau_workbooks_xml.append(
get_workbook_xml(starting_directory + '\\' + item)
)
elif item.endswith('.twbx'): # extract archive, extract xml
extract_destination = unzip_tmp_directory + '/' + EXTRACT_NAME
archive_directory = unzip(starting_directory + '\\' + item, extract_destination)
archive_workbook_xml = get_all_workbooks(extract_destination, unzip_tmp_directory)
if type(archive_workbook_xml) is list:
for xml_string in archive_workbook_xml:
if type(xml_string) is not str:
print('Unexpected type!')
else:
tableau_workbooks_xml.append(xml_string)
elif type(archive_workbook_xml) is str:
tableau_workbooks_xml.extend(archive_workbook_xml)
else:
print('Unexpected type! Error appending XML data to tableau_workbooks_xml')
exit()
# Remove your unzipped archive
shutil.rmtree(extract_destination)
elif os.path.isdir(starting_directory + '\\' + item): # recurse over subdirectory
tableau_workbooks_xml.extend(get_all_workbooks(starting_directory + '\\' + item, unzip_tmp_directory))
return tableau_workbooks_xml
def get_workbook_xml(path):
xml_contents = ''
print('Path = ' + path)
with open(path, "r") as file_stream:
xml_contents = file_stream.read()
return xml_contents
def list_of_xml_to_list(xml_list):
json_list = []
print('XML_LIST type: ' + str(type(xml_list)))
for xml in xml_list:
if type(xml) is list:
for xml_string in xml:
json_list.append(xmltodict.parse(xml_string))
print('XML type: ' + str(type(xml)))
print('XML type: ' + str(xml))
json_list.append(xmltodict.parse(xml))
return json_list
# Given the state of a flag, start cleaning process
def cleanse(json_summary):
json_summary = json_summary # not sure if this needs to be declared locally, this might be removable
# Remove the raw image content, not information about the images
if REMOVE_IMAGE_RAW_BITS:
# workbook.thumbnails.thumbnail[].#text
try:
for element in json_summary['workbook']['thumbnails']['thumbnail']:
# If this is not an element, it will be interpreted as a string which will crash the metho d and program
if type(element) is not str:
del element['#text']
try:
# Case where there is only one image in the workbook, not multiple
del json_summary['workbook']['thumbnails']['thumbnail']['#text']
except KeyError as key_error:
pass
except TypeError as type_error:
# happens when slicing a list, depends on what's in the attribute
pass
except KeyError as key_error:
pass
# workbook.external.shapes.shape[].#text
try:
for element in json_summary['workbook']['external']['shapes']['shape']:
# If this is not an element, it will be interpreted as a string which will crash the metho d and program
if type(element) is not str:
del element['#text']
try:
# Case where there is only one image in the workbook, not multiple
del json_summary['workbook']['external']['shapes']['shape']['#text']
except KeyError as key_error:
pass
except TypeError as type_error:
# happens when slicing a list, depends on what's in the attribute
pass
except KeyError as key_error:
pass
# Remove the raw SQL information, not the individual formulas that are used throughout the report
if REMOVE_CUSTOM_SQL:
# connection.metadata-records.metadata-record[].attributes.attribute[].#text
try:
for data_source in json_summary['workbook']['datasources']['datasource']:
# If this is not an element, it will be interpreted as a string which will crash the method and program
for metadata_record in data_source['connection']['metadata-records']['metadata-record']:
for attribute in metadata_record['attributes']['attribute']:
if type(attribute) is not str:
del attribute['#text']
except KeyError as key_error:
print('Recovered from key error when attempting to remove custom sql.')
print(key_error)
return json_summary
def get_windows_username():
os.getlogin()
def get_nix_username():
os.popen('whoami').read()
def make_tmp_directory(tmp_directory_location):
if os.path.exists(tmp_directory_location):
# don't make dir
pass
else:
pass
def clear_tmp_directory():
pass
def get_workbook_name():
# return $(FILE_NAME or PATH?)
pass
def detect_custom_sql():
# if datasources.datasource[].connection.@class == ("sqlproxy" or "postgres")
# however, still not indicative of custom SQL; are the XML queries you removed indicative?
pass
def main():
os.system('cls')
input('Defaulting to starting directory: ' + STARTING_DIRECTORY)
input('Defaulting to output directory: ' + OUTPUT_DIRECTORY)
input('Defaulting to tmp directory: ' + TMP_DIRECTORY)
input('Remove image raw bits (.#text): ' + str(REMOVE_IMAGE_RAW_BITS))
input('Remove custom SQL: ' + str(REMOVE_CUSTOM_SQL))
workbook_xml_list = get_all_workbooks(STARTING_DIRECTORY, TMP_DIRECTORY)
output_file_path = OUTPUT_DIRECTORY + os.sep + 'json_output.json'
json_list = list_of_xml_to_list(workbook_xml_list)
try: # file I/O writing output and removing a trailing character
# Write output
with open(output_file_path, 'w') as output_file:
output_file.write(JSON_WORKBOOKS_ARRAY_OPENER)
with open(output_file_path, 'a') as output_file:
for json_summary in json_list:
string = json.dumps(cleanse(json_summary))
output_file.write(string + JSON_WORKBOOKS_ARRAY_DELIMITER)
# remove trailing '},' that was generated at the end of the loop
with open(output_file_path, 'rb+') as filehandle:
filehandle.seek(-1, os.SEEK_END)
filehandle.truncate()
with open(output_file_path, 'a') as output_file:
output_file.write(JSON_WORKBOOKS_ARRAY_CLOSER)
except IOError as e:
print('Failed to open or write JSON output.' + str(e))
if __name__ == "__main__":
main()
| gpl-2.0 | -1,757,348,847,603,331,000 | 39.961373 | 145 | 0.621542 | false | 4.103181 | false | false | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/ascend_group.py | 2 | 5388 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import paddle.fluid as fluid
from paddle.fluid import unique_name
import paddle.fluid.core as core
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_optimizers.ascend import ascend_parser, ascend_optimizer
from collections import namedtuple
Block = namedtuple('Block', ['program'])
Loss = namedtuple('Loss', ['block'])
paddle.enable_static()
OpRole = core.op_proto_and_checker_maker.OpRole
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OP_ROLE_VAR_KEY = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
role = fleet.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def init_communicator(startup_program, main_program, current_endpoint,
endpoints, ring_id):
nranks = len(endpoints)
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
group_rank = endpoints.index(current_endpoint)
assert group_rank >= 0
block = startup_program.global_block()
nccl_id_var = block.create_var(
name=unique_name.generate('nccl_id'),
persistable=True,
type=core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': group_rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints,
OP_ROLE_KEY: OpRole.Forward,
})
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': group_rank,
'ring_id': ring_id,
OP_ROLE_KEY: OpRole.Forward,
})
# add input op for test
fill_var_name = "tensor@Filled"
fill_var = block.create_var(
name=fill_var_name,
shape=[10, 10],
dtype='float32',
persistable=False,
stop_gradient=True)
block.append_op(
type="fill_constant",
outputs={"Out": fill_var_name},
attrs={
"shape": [10, 10],
"dtype": fill_var.dtype,
"value": 1.0,
"place_type": 1
})
with fluid.program_guard(main_program):
op_type = "c_allreduce_sum"
data = fluid.layers.fill_constant(shape=[1], dtype='float32', value=2.5)
helper = LayerHelper(op_type, **locals())
helper.append_op(
type=op_type,
inputs={'X': [data]},
outputs={'Out': [data]},
attrs={'ring_id': ring_id,
'use_calc_stream': True})
print("startup program:", startup_program)
print("main program:", main_program)
def train(world_endpoints, world_device_ids, local_device_ids, local_rank):
startup_programs = []
main_programs = []
#trainer_endpoints=["127.0.0.1:6071","127.0.0.1:6072","127.0.0.1:6073","127.0.0.1:6074"]
trainer_endpoints = world_endpoints
groups = [[], [], []]
groups[0] = [trainer_endpoints[0], trainer_endpoints[1]]
groups[1] = [trainer_endpoints[2], trainer_endpoints[3]]
groups[2] = [trainer_endpoints[0], trainer_endpoints[2]]
print("groups:", groups)
for i in range(len(trainer_endpoints)):
startup_programs.append(fluid.Program())
main_programs.append(fluid.Program())
for idx, group in enumerate(groups):
for te in group:
te_idx = trainer_endpoints.index(te)
startup_program = startup_programs[te_idx]
main_program = main_programs[te_idx]
init_communicator(startup_program, main_program, te, group, idx)
print(len(startup_programs))
print(startup_programs[local_rank])
print(main_programs[local_rank])
print("local rank: ", local_rank)
print("local startup program: ", startup_programs[local_rank])
startup_program = startup_programs[local_rank]
main_program = main_programs[local_rank]
loss = Loss(Block(main_program))
optimizer = ascend_optimizer.AscendOptimizer(None, fetch_list=[])
optimizer.minimize(
loss,
startup_program,
auto_dp=True,
rank_table_file=os.getenv("RANK_TABLE_FILE", None))
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_program)
exe.run(main_program)
worker_endpoints = fleet.worker_endpoints()
world_device_ids = fleet.world_device_ids()
local_device_ids = fleet.local_device_ids()
local_rank = int(fleet.local_rank())
print("worker_endpoints:", worker_endpoints)
print("world_device_ids:", world_device_ids)
print("local_device_ids:", local_device_ids)
print("local_rank:", local_rank)
train(worker_endpoints, world_device_ids, local_device_ids, local_rank)
| apache-2.0 | -1,859,342,429,792,507,600 | 32.259259 | 92 | 0.645323 | false | 3.523872 | false | false | false |
metajack/skia | third_party/harfbuzz/contrib/tables/mirroring-parse.py | 53 | 1658 | import sys
# http://www.unicode.org/Public/UNIDATA/auxiliary/BidiMirroring.txt
# This parses a file in the format of the above file and outputs a table
# suitable for bsearch(3). This table maps Unicode code points to their
# 'mirror'. (Mirroring is used when rendering RTL characters, see the Unicode
# standard). By convention, this mapping should be commutative, but this code
# doesn't enforce or check this.
def main(infile, outfile):
pairs = []
for line in infile:
line = line[:-1]
if len(line) == 0 or line[0] == '#':
continue
if '#' in line:
(data, _) = line.split('#', 1)
else:
data = line
if ';' not in data:
continue
(a, b) = data.split(';', 1)
a = int(a, 16)
b = int(b, 16)
pairs.append((a, b))
pairs.sort()
print >>outfile, '// Generated from Unicode Bidi Mirroring tables\n'
print >>outfile, '#ifndef MIRRORING_PROPERTY_H_'
print >>outfile, '#define MIRRORING_PROPERTY_H_\n'
print >>outfile, '#include <stdint.h>'
print >>outfile, 'struct mirroring_property {'
print >>outfile, ' uint32_t a;'
print >>outfile, ' uint32_t b;'
print >>outfile, '};\n'
print >>outfile, 'static const struct mirroring_property mirroring_properties[] = {'
for pair in pairs:
print >>outfile, ' {0x%x, 0x%x},' % pair
print >>outfile, '};\n'
print >>outfile, 'static const unsigned mirroring_properties_count = %d;\n' % len(pairs)
print >>outfile, '#endif // MIRRORING_PROPERTY_H_'
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: %s <input .txt> <output .h>' % sys.argv[0]
else:
main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
| bsd-3-clause | -7,345,047,434,276,346,000 | 32.16 | 90 | 0.627262 | false | 3.176245 | false | false | false |
ardoi/datajuicer | lsjuicer/ui/windows/start.py | 1 | 3101 | from PyQt5 import QtGui as QG
from PyQt5 import QtWidgets as QW
from PyQt5 import QtCore as QC
from constants import Constants
class StartUI(QW.QDialog):
mode_set = QC.pyqtSignal(int)
"""Main user interface window"""
def __init__(self, parent = None):
super(StartUI, self).__init__(parent)
self.setup_ui()
def setup_ui(self):
self.modeasker = QW.QWidget()
modelayout = QW.QVBoxLayout()
self.modeasker.setLayout(modelayout)
modelayout.addWidget(QW.QLabel('Choose mode:'))
buttonlayout = QW.QHBoxLayout()
self.sparkpix_g = QG.QPixmap(":/sparkbutton_gray.png")
self.sparkicon = QG.QIcon(self.sparkpix_g)
self.sparkpix = QG.QPixmap(":/sparkbutton.png")
self.transientpix_g = QG.QPixmap(":/transientbutton_gray.png")
self.transienticon = QG.QIcon(self.transientpix_g)
self.transientpix = QG.QPixmap(":/transientbutton.png")
self.sparkb = QW.QPushButton(self.sparkicon,'')
self.sparkb.setCheckable(True)
self.sparkb.setIconSize(QC.QSize(140, 140))
self.sparkb.setSizePolicy(QW.QSizePolicy.Expanding,
QW.QSizePolicy.Expanding)
self.transientb = QW.QPushButton(self.transienticon,'')
self.transientb.setCheckable(True)
self.transientb.setMouseTracking(True)
self.transientb.setIconSize(QC.QSize(140, 140))
self.transientb.setSizePolicy(QW.QSizePolicy.Expanding,
QW.QSizePolicy.Expanding)
buttonlayout.addWidget(self.sparkb)
buttonlayout.addWidget(self.transientb)
modelayout.addLayout(buttonlayout)
self.gobutton = QW.QPushButton('OK')
self.gobutton.setEnabled(False)
modelayout.addWidget(self.gobutton)
self.setLayout(QW.QVBoxLayout())
self.layout().addWidget(self.modeasker)
#self.setCentralWidget(self.modeasker)
onsc = lambda : self.setbuttons(0)
ontc = lambda : self.setbuttons(1)
self.sparkb.clicked[()].connect(onsc)
self.transientb.clicked[()].connect(ontc)
self.gobutton.clicked[()].connect(self.go)
#self.setWindowFlags(QC.Qt.Dialog)
def go(self):
if self.sparkb.isChecked():
self.mode_set.emit(Constants.SPARK_TYPE)
else:
self.mode_set.emit(Constants.TRANSIENT_TYPE)
#self.close()
return QW.QDialog.accept(self)
def setbuttons(self, state):
if not self.gobutton.isEnabled():
self.gobutton.setEnabled(True)
if state == 0:
self.sparkb.setChecked(True)
self.transientb.setChecked(False)
self.sparkicon = QG.QIcon(self.sparkpix)
self.transienticon = QG.QIcon(self.transientpix_g)
elif state == 1:
self.transientb.setChecked(True)
self.sparkb.setChecked(False)
self.sparkicon = QG.QIcon(self.sparkpix_g)
self.transienticon = QG.QIcon(self.transientpix)
self.sparkb.setIcon(self.sparkicon)
self.transientb.setIcon(self.transienticon)
| gpl-3.0 | 4,742,097,940,841,553,000 | 35.916667 | 70 | 0.643341 | false | 3.5 | false | false | false |
openstack/octavia | octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py | 1 | 6881 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from unittest import mock
from octavia.amphorae.backends.utils import haproxy_query as query
from octavia.common import constants
from octavia.common import utils as octavia_utils
import octavia.tests.unit.base as base
STATS_SOCKET_SAMPLE = (
"# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,"
"econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,"
"downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,"
"rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp"
"_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot"
",cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,"
"last_agt,qtime,ctime,rtime,ttime,\n"
"http-servers:listener-id,id-34821,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,"
"1,1,575,575,,1,3,1,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,,"
",0,0,0,0,\n"
"http-servers:listener-id,id-34824,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0,"
"1,1,567,567,,1,3,2,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,,"
",0,0,0,0,\n"
"http-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,DOWN,0,0,"
"0,,1,567,567,,1,3,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,0,0,0,0,-1,,,0,0,0,"
"0,\n"
"tcp-servers:listener-id,id-34833,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,1,1,"
"560,560,,1,5,1,,0,,2,0,,0,L4TOUT,,30000,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0,"
"\n"
"tcp-servers:listener-id,id-34836,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,1,1,"
"552,552,,1,5,2,,0,,2,0,,0,L4TOUT,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0,"
"\n"
"tcp-servers:listener-id,id-34839,0,0,0,0,,0,0,0,,0,,0,0,0,0,DRAIN,0,1,0,"
"0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0,"
"\n"
"tcp-servers:listener-id,id-34842,0,0,0,0,,0,0,0,,0,,0,0,0,0,MAINT,0,1,0,"
"0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0,"
"\n"
"tcp-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,UP,0,0,0,,"
"1,552,552,,1,5,0,,0,,1,0,,0,,,,,,,,,,,,,,0,0,0,0,0,0,-1,,,0,0,0,0,"
)
INFO_SOCKET_SAMPLE = (
'Name: HAProxy\nVersion: 1.5.3\nRelease_date: 2014/07/25\nNbproc: 1\n'
'Process_num: 1\nPid: 2238\nUptime: 0d 2h22m17s\nUptime_sec: 8537\n'
'Memmax_MB: 0\nUlimit-n: 4031\nMaxsock: 4031\nMaxconn: 2000\n'
'Hard_maxconn: 2000\nCurrConns: 0\nCumConns: 32\nCumReq: 32\n'
'MaxSslConns: 0\nCurrSslConns: 0\nCumSslConns: 0\nMaxpipes: 0\n'
'PipesUsed: 0\nPipesFree: 0\nConnRate: 0\nConnRateLimit: 0\n'
'MaxConnRate: 0\nSessRate: 0\nSessRateLimit: 0\nMaxSessRate: 0\n'
'SslRate:0\nSslRateLimit: 0\nMaxSslRate: 0\nSslFrontendKeyRate: 0\n'
'SslFrontendMaxKeyRate: 0\nSslFrontendSessionReuse_pct: 0\n'
'SslBackendKeyRate: 0\nSslBackendMaxKeyRate: 0\nSslCacheLookups: 0\n'
'SslCacheMisses: 0\nCompressBpsIn: 0\nCompressBpsOut: 0\n'
'CompressBpsRateLim: 0\nZlibMemUsage: 0\nMaxZlibMemUsage: 0\nTasks: 4\n'
'Run_queue: 1\nIdle_pct: 100\nnode: amphora-abd35de5-e377-49c5-be32\n'
'description:'
)
class QueryTestCase(base.TestCase):
def setUp(self):
self.q = query.HAProxyQuery('')
super().setUp()
@mock.patch('socket.socket')
def test_query(self, mock_socket):
sock = mock.MagicMock()
sock.connect.side_effect = [None, socket.error]
sock.recv.side_effect = ['testdata', None]
mock_socket.return_value = sock
self.q._query('test')
sock.connect.assert_called_once_with('')
sock.send.assert_called_once_with(octavia_utils.b('test\n'))
sock.recv.assert_called_with(1024)
self.assertTrue(sock.close.called)
self.assertRaisesRegex(Exception,
'HAProxy \'test\' query failed.',
self.q._query, 'test')
def test_get_pool_status(self):
query_mock = mock.Mock()
self.q._query = query_mock
query_mock.return_value = STATS_SOCKET_SAMPLE
self.assertEqual(
{'tcp-servers:listener-id': {
'status': constants.UP,
'listener_uuid': 'listener-id',
'pool_uuid': 'tcp-servers',
'members':
{'id-34833': constants.UP,
'id-34836': constants.UP,
'id-34839': constants.DRAIN,
'id-34842': constants.MAINT}},
'http-servers:listener-id': {
'status': constants.DOWN,
'listener_uuid': 'listener-id',
'pool_uuid': 'http-servers',
'members':
{'id-34821': constants.DOWN,
'id-34824': constants.DOWN}}},
self.q.get_pool_status()
)
def test_show_info(self):
query_mock = mock.Mock()
self.q._query = query_mock
query_mock.return_value = INFO_SOCKET_SAMPLE
self.assertEqual(
{'SslRateLimit': '0', 'SessRateLimit': '0', 'Version': '1.5.3',
'Hard_maxconn': '2000', 'Ulimit-n': '4031', 'PipesFree': '0',
'SslRate': '0', 'ZlibMemUsage': '0', 'CumConns': '32',
'ConnRate': '0', 'Memmax_MB': '0', 'CompressBpsOut': '0',
'MaxConnRate': '0', 'Uptime_sec': '8537', 'SslCacheMisses': '0',
'MaxZlibMemUsage': '0', 'SslCacheLookups': '0',
'CurrSslConns': '0', 'SslBackendKeyRate': '0',
'CompressBpsRateLim': '0', 'Run_queue': '1', 'CumReq': '32',
'SslBackendMaxKeyRate': '0', 'SslFrontendSessionReuse_pct': '0',
'Nbproc': '1', 'Tasks': '4', 'Maxpipes': '0', 'Maxconn': '2000',
'Pid': '2238', 'Maxsock': '4031', 'CurrConns': '0',
'Idle_pct': '100', 'CompressBpsIn': '0',
'SslFrontendKeyRate': '0', 'MaxSessRate': '0', 'Process_num': '1',
'Uptime': '0d 2h22m17s', 'PipesUsed': '0', 'SessRate': '0',
'MaxSslRate': '0', 'ConnRateLimit': '0', 'CumSslConns': '0',
'Name': 'HAProxy', 'SslFrontendMaxKeyRate': '0',
'MaxSslConns': '0', 'node': 'amphora-abd35de5-e377-49c5-be32',
'description': '', 'Release_date': '2014/07/25'},
self.q.show_info()
)
| apache-2.0 | -5,764,914,478,178,473,000 | 46.455172 | 79 | 0.58102 | false | 2.543808 | true | false | false |
riccardo1991/virtual-device | app/models.py | 1 | 4555 | import numpy
import scipy.stats
# Sensor class represents a sensor connected to the virtual device.
class Sensor:
def __init__(self, sensor_id, name, sensor_type, minimum, maximum, sampling_rate, distribution):
self._id = sensor_id
self._name = name
self._type = sensor_type
self._minimum = minimum
self._maximum = maximum
self._sampling_rate = sampling_rate
self._distribution = distribution
self._status = False
self._value = 0
# Overload of the print of a sensor object.
def __str__(self):
return "" + str(self._id) + " " + self._name + " " + str(self._status)
# ID getter.
@property
def id(self):
return self._id
# Name getter.
@property
def name(self):
return self._name
# Name setter.
@name.setter
def name(self, name):
self._name = name
# Type getter.
@property
def type(self):
return self._type
# Type setter.
@type.setter
def type(self, type):
self._type = type
# Minimum getter.
@property
def minimum(self):
return self._minimum
# Minimum setter.
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
# Maximum getter.
@property
def maximum(self):
return self._maximum
# Maximum setter.
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
# Sampling rate getter.
@property
def sampling_rate(self):
return self._sampling_rate
# Sampling rate setter.
@sampling_rate.setter
def sampling_rate(self, sampling_rate):
self._sampling_rate = sampling_rate
# Distribution getter.
@property
def distribution(self):
return self._distribution
# Distribution setter.
@distribution.setter
def distribution(self, distribution):
self._distribution = distribution
# Value getter.
@property
def value(self):
return self._value
# Function for generate a random integer value following a normal distribution.
def normalDistributionInteger(self):
x = numpy.arange(self._minimum, self._maximum + 1)
sigma = numpy.std(x)
xU, xL = x + 0.5, x - 0.5
prob = scipy.stats.norm.cdf(xU, scale=3*sigma) - scipy.stats.norm.cdf(xL, scale=3*sigma)
prob = prob / prob.sum() # normalize the probabilities so their sum is 1
self._value = numpy.random.choice(x, p=prob)
# Function for generate a float integer value following a normal distribution.
def normalDistributionFloat(self):
x = numpy.linspace(round(self._minimum, 2), round(self._maximum + 1, 2))
sigma = numpy.std(x)
xU, xL = x + 0.50, x - 0.50
prob = scipy.stats.norm.cdf(xU, scale=3*sigma) - scipy.stats.norm.cdf(xL, scale=3*sigma)
prob = prob / prob.sum() # normalize the probabilities so their sum is 1
self._value = round(numpy.random.choice(x, p=prob), 2)
# Function for generate a random integer value following a uniform distribution.
def uniformDistributionInteger(self):
self._value = numpy.random.randint(self._minimum, self._maximum + 1)
# Function for generate a random float value following a normal distribution.
def uniformDistributionFloat(self):
x = numpy.random.uniform(self.minimum, self._maximum + 1)
# Round the generated number with 2 decimals.
self._value = round(x, 2)
# Activate the sensor.
def activate(self):
self._status = True
# Deactivate the sensor.
def deactivate(self):
self._status = False
# Return the status of the sensor.
def isActive(self):
return self._status
# Assign a random value to the value attribute depending from the distribution and type.
def randomValue(self):
if self._status:
if (self._distribution == 'normal') and (self._type == 'integer'):
self.normalDistributionInteger()
elif (self._distribution == 'normal') and (self._type == 'float'):
self.normalDistributionFloat()
elif (self._distribution == 'uniform') and (self._type == 'integer'):
self.uniformDistributionInteger()
elif (self._distribution == 'uniform') and (self._type == 'float'):
self.uniformDistributionFloat()
else:
print("Device", self._id, "is not active, please activate it before")
| mit | -722,739,101,430,308,000 | 30.631944 | 100 | 0.612733 | false | 4.186581 | false | false | false |
grow/grow-ext-build-server | grow_build_server/google_sheets.py | 1 | 5864 | from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from googleapiclient import errors
from googleapiclient import discovery
from oauth2client.contrib import appengine
import cgi
import csv
import httplib2
import io
import logging
import time
import random
import os
RELOAD_ACL_QUERY_PARAM = 'grow-reload-acl'
SCOPE = 'https://www.googleapis.com/auth/drive'
EDIT_URL = 'https://docs.google.com/spreadsheets/d/{}'
RETRY_ERRORS = [
'backendError',
'internalServerError',
'quotaExceeded',
'userRateLimitExceeded',
]
discovery.logger.setLevel(logging.WARNING)
urlfetch.set_default_fetch_deadline(60)
class Error(Exception):
pass
class Settings(ndb.Model):
sheet_id = ndb.StringProperty()
sheet_gid_global = ndb.StringProperty()
sheet_gid_admins = ndb.StringProperty()
@classmethod
def instance(cls):
key = ndb.Key(cls.__name__, 'Settings')
ent = key.get()
if ent is None:
ent = cls(key=key)
ent.put()
logging.info('Created settings -> {}'.format(key))
return ent
def get_query_dict():
query_string = os.getenv('QUERY_STRING', '')
return cgi.parse_qs(query_string, keep_blank_values=True)
def create_service(api='drive', version='v2'):
credentials = appengine.AppAssertionCredentials(SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
return discovery.build(api, version, http=http)
def _request_with_backoff(service, url):
for n in range(0, 5):
resp, content = service._http.request(url)
if resp.status in [429]:
logging.info('Attempt {} for {}'.format(n, url))
logging.info(resp)
time.sleep((2 ** (n + 1)) + random.random())
continue
return resp, content
raise Error('Error {} {} downloading sheet: {}'.format(resp.status, resp.reason, url))
def _request_sheet_content(sheet_id, gid=None):
service = create_service()
logging.info('Loading ACL -> {}'.format(sheet_id))
for n in range(0, 5):
try:
resp = service.files().get(fileId=sheet_id).execute()
except errors.HttpError as error:
if error.resp.reason in RETRY_ERRORS:
logging.info('Attempt {} for {}'.format(n, url))
time.sleep((2 ** (n + 1)) + random.random())
continue
raise
if 'exportLinks' not in resp:
raise Error('Nothing to export: {}'.format(sheet_id))
for mimetype, url in resp['exportLinks'].iteritems():
if not mimetype.endswith('csv'):
continue
if gid is not None:
url += '&gid={}'.format(gid)
resp, content = _request_with_backoff(service, url)
if resp.status != 200:
text = 'Error {} downloading sheet: {}:{}'
text = text.format(resp.status, sheet_id, gid)
raise Error(text)
return content
def get_sheet(sheet_id, gid=None, use_cache=True):
"""Returns a list of rows from a sheet."""
query_dict = get_query_dict()
force_cache = RELOAD_ACL_QUERY_PARAM in query_dict
cache_key = 'google_sheet:{}:{}'.format(sheet_id, gid)
logging.info('Loading Google Sheet -> {}'.format(cache_key))
result = memcache.get(cache_key)
if result is None or force_cache or not use_cache:
content = _request_sheet_content(sheet_id, gid=gid)
fp = io.BytesIO()
fp.write(content)
fp.seek(0)
reader = csv.DictReader(fp)
result = [row for row in reader]
logging.info('Saving Google Sheet in cache -> {}'.format(cache_key))
memcache.set(cache_key, result)
return result
def append_rows(sheet_id, gid, rows_to_append):
rows = []
for row in rows_to_append:
values = []
for item in row:
values.append({
'userEnteredValue': {
'stringValue': item,
},
})
rows.append({'values': values})
service = create_service(api='sheets', version='v4')
requests = []
requests.append({
'appendCells': {
'fields': 'userEnteredValue',
'rows': rows,
'sheetId': gid,
},
})
body = {'requests': requests}
resp = service.spreadsheets().batchUpdate(
spreadsheetId=sheet_id, body=body).execute()
def get_spreadsheet_url(sheet_id, gid=None):
url = 'https://docs.google.com/spreadsheets/d/{}'.format(sheet_id)
if gid:
url += '#gid={}'.format(gid)
return url
def create_sheet(title='Untitled Grow Website Access'):
service = create_service()
data = {
'title' : title,
'mimeType' : 'application/vnd.google-apps.spreadsheet'
}
resp = service.files().insert(body=data, fields='id').execute()
logging.info('Created sheet -> {}'.format(resp['id']))
return resp['id']
def share_sheet(file_id, emails):
service = create_service()
for email in emails:
permission = {
'type': 'user',
'role': 'writer',
'value': email,
}
service.permissions().insert(
fileId=file_id,
body=permission,
fields='id',
).execute()
logging.info('Shared sheet -> {}'.format(email))
def get_or_create_sheet_from_settings(title=None, emails=None):
settings = Settings.instance()
if settings.sheet_id is None:
if title:
title = '{} Website Access'.format(title)
sheet_id = create_sheet(title=title)
share_sheet(sheet_id, emails)
settings.sheet_id = sheet_id
settings.put()
sheet_id = settings.sheet_id
sheet_gid_global = settings.sheet_gid_global
resp = get_sheet(sheet_id, gid=sheet_gid_global)
return resp
| mit | -2,237,736,920,941,344,300 | 29.541667 | 90 | 0.601467 | false | 3.795469 | false | false | false |
gmichaeljaison/cv-utils | cv_utils/template_matching.py | 1 | 5887 | from __future__ import division
import numpy as np
import cv2 as cv
import scipy.spatial
from cv_utils import Box, img_utils, feature_extractor as fe
_DEF_TM_OPT = dict(feature='rgb',
distance='correlation',
normalize=True,
retain_size=True)
def match_one(template, image, options=None):
"""
Match template and find exactly one match in the Image using specified features.
:param template: Template Image
:param image: Search Image
:param options: Options include
- features: List of options for each feature
:return: (Box, Score) Bounding box of the matched object, Heatmap value
"""
heatmap, scale = multi_feat_match(template, image, options)
min_val, _, min_loc, _ = cv.minMaxLoc(heatmap)
top_left = tuple(scale * x for x in min_loc)
score = min_val
h, w = template.shape[:2]
return Box(top_left[0], top_left[1], w, h), score
def multi_feat_match(template, image, options=None):
"""
Match template and image by extracting multiple features (specified) from it.
:param template: Template image
:param image: Search image
:param options: Options include
- features: List of options for each feature
:return:
"""
h, w = image.shape[:2]
scale = 1
if options is not None and 'features' in options:
heatmap = np.zeros((h, w), dtype=np.float64)
for foptions in options['features']:
f_hmap, _ = feature_match(template, image, foptions)
heatmap += cv.resize(f_hmap, (w, h), interpolation=cv.INTER_AREA)
heatmap /= len(options['features'])
else:
heatmap, scale = feature_match(template, image, options)
return heatmap, scale
def feature_match(template, image, options=None):
"""
Match template and image by extracting specified feature
:param template: Template image
:param image: Search image
:param options: Options include
- feature: Feature extractor to use. Default is 'rgb'. Available options are:
'hog', 'lab', 'rgb', 'gray'
:return: Heatmap
"""
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
feat = fe.factory(op['feature'])
tmpl_f = feat(template, op)
img_f = feat(image, op)
scale = image.shape[0] / img_f.shape[0]
heatmap = match_template(tmpl_f, img_f, op)
return heatmap, scale
def match_template(template, image, options=None):
"""
Multi channel template matching using simple correlation distance
:param template: Template image
:param image: Search image
:param options: Other options:
- distance: Distance measure to use. Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
"""
# If the input has max of 3 channels, use the faster OpenCV matching
if len(image.shape) <= 3 and image.shape[2] <= 3:
return match_template_opencv(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
template = img_utils.gray3(template)
image = img_utils.gray3(image)
h, w, d = template.shape
im_h, im_w = image.shape[:2]
template_v = template.flatten()
heatmap = np.zeros((im_h - h, im_w - w))
for col in range(0, im_w - w):
for row in range(0, im_h - h):
cropped_im = image[row:row + h, col:col + w, :]
cropped_v = cropped_im.flatten()
if op['distance'] == 'euclidean':
heatmap[row, col] = scipy.spatial.distance.euclidean(template_v, cropped_v)
elif op['distance'] == 'correlation':
heatmap[row, col] = scipy.spatial.distance.correlation(template_v, cropped_v)
# normalize
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap
def match_template_opencv(template, image, options):
"""
Match template using OpenCV template matching implementation.
Limited by number of channels as maximum of 3.
Suitable for direct RGB or Gray-scale matching
:param options: Other options:
- distance: Distance measure to use. (euclidean | correlation | ccoeff).
Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
"""
# if image has more than 3 channels, use own implementation
if len(image.shape) > 3:
return match_template(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
method = cv.TM_CCORR_NORMED
if op['normalize'] and op['distance'] == 'euclidean':
method = cv.TM_SQDIFF_NORMED
elif op['distance'] == 'euclidean':
method = cv.TM_SQDIFF
elif op['normalize'] and op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF_NORMED
elif op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF
elif not op['normalize'] and op['distance'] == 'correlation':
method = cv.TM_CCORR
heatmap = cv.matchTemplate(image, template, method)
# make minimum peak heatmap
if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
heatmap = heatmap.max() - heatmap
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap
| gpl-3.0 | 6,487,248,755,391,905,000 | 30.821622 | 93 | 0.621199 | false | 3.679375 | false | false | false |
stefanv/scipy3 | scipy/linalg/info.py | 2 | 2837 | """
Linear algebra routines
=======================
Linear Algebra Basics::
inv --- Find the inverse of a square matrix
solve --- Solve a linear system of equations
solve_banded --- Solve a linear system of equations with a banded matrix
solveh_banded --- Solve a linear system of equations with a Hermitian or symmetric banded matrix, returning the Cholesky decomposition as well
det --- Find the determinant of a square matrix
norm --- matrix and vector norm
lstsq --- Solve linear least-squares problem
pinv --- Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 --- Pseudo-inverse using svd
Eigenvalues and Decompositions::
eig --- Find the eigenvalues and vectors of a square matrix
eigvals --- Find the eigenvalues of a square matrix
eig_banded --- Find the eigenvalues and vectors of a band matrix
eigvals_banded --- Find the eigenvalues of a band matrix
lu --- LU decomposition of a matrix
lu_factor --- LU decomposition returning unordered matrix and pivots
lu_solve --- solve Ax=b using back substitution with output of lu_factor
svd --- Singular value decomposition of a matrix
svdvals --- Singular values of a matrix
diagsvd --- construct matrix of singular values from output of svd
orth --- construct orthonormal basis for range of A using svd
cholesky --- Cholesky decomposition of a matrix
cholesky_banded --- Cholesky decomposition of a banded symmetric or Hermitian matrix
cho_factor --- Cholesky decomposition for use in solving linear system
cho_solve --- Solve previously factored linear system
qr --- QR decomposition of a matrix
schur --- Schur decomposition of a matrix
rsf2csf --- Real to complex schur form
hessenberg --- Hessenberg form of a matrix
matrix Functions::
expm --- matrix exponential using Pade approx.
expm2 --- matrix exponential using Eigenvalue decomp.
expm3 --- matrix exponential using Taylor-series expansion
logm --- matrix logarithm
cosm --- matrix cosine
sinm --- matrix sine
tanm --- matrix tangent
coshm --- matrix hyperbolic cosine
sinhm --- matrix hyperbolic sine
tanhm --- matrix hyperbolic tangent
signm --- matrix sign
sqrtm --- matrix square root
funm --- Evaluating an arbitrary matrix function.
Iterative linear systems solutions::
cg --- Conjugate gradient (symmetric systems only)
cgs --- Conjugate gradient squared
qmr --- Quasi-minimal residual
gmres --- Generalized minimal residual
bicg --- Bi-conjugate gradient
bicgstab --- Bi-conjugate gradient stabilized
"""
postpone_import = 1
depends = ['misc','lib.lapack']
| bsd-3-clause | -5,861,253,898,052,439,000 | 41.343284 | 145 | 0.666549 | false | 4.31155 | false | false | false |
antoniodemora/git-cola | cola/widgets/submodules.py | 2 | 3981 | """Provides widgets related to submodules"""
from __future__ import absolute_import
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
from .. import cmds
from .. import core
from .. import qtutils
from .. import icons
from ..i18n import N_
from ..widgets import defs
from ..widgets import standard
class SubmodulesWidget(QtWidgets.QFrame):
def __init__(self, context, parent):
QtWidgets.QFrame.__init__(self, parent)
self.setToolTip(N_('Submodules'))
self.tree = SubmodulesTreeWidget(context, parent=self)
self.setFocusProxy(self.tree)
self.main_layout = qtutils.vbox(defs.no_margin, defs.spacing, self.tree)
self.setLayout(self.main_layout)
# Titlebar buttons
self.refresh_button = qtutils.create_action_button(
tooltip=N_('Refresh'), icon=icons.sync())
self.open_parent_button = qtutils.create_action_button(
tooltip=N_('Open Parent'), icon=icons.repo())
self.button_layout = qtutils.hbox(defs.no_margin, defs.spacing,
self.open_parent_button,
self.refresh_button)
self.corner_widget = QtWidgets.QWidget(self)
self.corner_widget.setLayout(self.button_layout)
titlebar = parent.titleBarWidget()
titlebar.add_corner_widget(self.corner_widget)
# Connections
qtutils.connect_button(self.refresh_button,
context.model.update_submodules_list)
qtutils.connect_button(self.open_parent_button,
cmds.run(cmds.OpenParentRepo, context))
class SubmodulesTreeWidget(standard.TreeWidget):
updated = Signal()
def __init__(self, context, parent=None):
standard.TreeWidget.__init__(self, parent=parent)
self.context = context
self.main_model = model = context.model
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.setHeaderHidden(True)
# UI
self._active = False
self.list_helper = BuildItem()
self.itemDoubleClicked.connect(self.tree_double_clicked)
# Connections
self.updated.connect(self.refresh, type=Qt.QueuedConnection)
model.add_observer(model.message_submodules_changed,
self.updated.emit)
def refresh(self):
if not self._active:
return
items = [self.list_helper.get(entry) for entry in
self.main_model.submodules_list]
self.clear()
self.addTopLevelItems(items)
def showEvent(self, event):
"""Defer updating widgets until the widget is visible"""
if not self._active:
self._active = True
self.refresh()
return super(SubmodulesTreeWidget, self).showEvent(event)
def tree_double_clicked(self, item, _column):
path = core.abspath(item.path)
cmds.do(cmds.OpenRepo, self.context, path)
class BuildItem(object):
def __init__(self):
self.state_folder_map = {}
self.state_folder_map[''] = icons.folder()
self.state_folder_map['+'] = icons.staged()
self.state_folder_map['-'] = icons.modified()
self.state_folder_map['U'] = icons.merge()
def get(self, entry):
"""entry: same as returned from list_submodule"""
name = entry[2]
path = entry[2]
# TODO better tip
tip = path + '\n' + entry[1]
if entry[3]:
tip += '\n({0})'.format(entry[3])
icon = self.state_folder_map[entry[0]]
return SubmodulesTreeWidgetItem(name, path, tip, icon)
class SubmodulesTreeWidgetItem(QtWidgets.QTreeWidgetItem):
def __init__(self, name, path, tip, icon):
QtWidgets.QTreeWidgetItem.__init__(self)
self.path = path
self.setIcon(0, icon)
self.setText(0, name)
self.setToolTip(0, tip)
| gpl-2.0 | -887,538,373,377,109,100 | 31.900826 | 80 | 0.613916 | false | 3.953327 | false | false | false |
geekboxzone/lollipop_external_chromium_org | build/android/gyp/util/md5_check.py | 99 | 2375 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import os
def CallAndRecordIfStale(
function, record_path=None, input_paths=None, input_strings=None,
force=False):
"""Calls function if the md5sum of the input paths/strings has changed.
The md5sum of the inputs is compared with the one stored in record_path. If
this has changed (or the record doesn't exist), function will be called and
the new md5sum will be recorded.
If force is True, the function will be called regardless of whether the
md5sum is out of date.
"""
if not input_paths:
input_paths = []
if not input_strings:
input_strings = []
md5_checker = _Md5Checker(
record_path=record_path,
input_paths=input_paths,
input_strings=input_strings)
if force or md5_checker.IsStale():
function()
md5_checker.Write()
def _UpdateMd5ForFile(md5, path, block_size=2**16):
with open(path, 'rb') as infile:
while True:
data = infile.read(block_size)
if not data:
break
md5.update(data)
def _UpdateMd5ForDirectory(md5, dir_path):
for root, _, files in os.walk(dir_path):
for f in files:
_UpdateMd5ForFile(md5, os.path.join(root, f))
def _UpdateMd5ForPath(md5, path):
if os.path.isdir(path):
_UpdateMd5ForDirectory(md5, path)
else:
_UpdateMd5ForFile(md5, path)
class _Md5Checker(object):
def __init__(self, record_path=None, input_paths=None, input_strings=None):
if not input_paths:
input_paths = []
if not input_strings:
input_strings = []
assert record_path.endswith('.stamp'), (
'record paths must end in \'.stamp\' so that they are easy to find '
'and delete')
self.record_path = record_path
md5 = hashlib.md5()
for i in sorted(input_paths):
_UpdateMd5ForPath(md5, i)
for s in input_strings:
md5.update(s)
self.new_digest = md5.hexdigest()
self.old_digest = ''
if os.path.exists(self.record_path):
with open(self.record_path, 'r') as old_record:
self.old_digest = old_record.read()
def IsStale(self):
return self.old_digest != self.new_digest
def Write(self):
with open(self.record_path, 'w') as new_record:
new_record.write(self.new_digest)
| bsd-3-clause | 3,922,210,798,291,311,600 | 26.616279 | 77 | 0.667368 | false | 3.280387 | false | false | false |
jakesyl/BitTornado | BitTornado/clock.py | 2 | 1086 | """Provide a non-decreasing clock() function.
In Windows, time.clock() provides number of seconds from first call, so use
that.
In Unix, time.clock() is CPU time, and time.time() reports system time, which
may not be non-decreasing."""
import time
import sys
_MAXFORWARD = 100
_FUDGE = 1
class RelativeTime(object): # pylint: disable=R0903
"""Non-decreasing time implementation for Unix"""
def __init__(self):
self.time = time.time()
self.offset = 0
def get_time(self):
"""Calculate a non-decreasing time representation"""
systemtime = time.time()
now = systemtime + self.offset
if self.time < now < self.time + _MAXFORWARD:
self.time = now
else:
# If time jump is outside acceptable bounds, move ahead one second
# and note the offset
self.time += _FUDGE
self.offset = self.time - systemtime
return self.time
if sys.platform != 'win32':
clock = RelativeTime().get_time # pylint: disable=C0103
else:
from time import clock
| mit | 6,522,149,571,640,859,000 | 25.487805 | 78 | 0.62523 | false | 3.892473 | false | false | false |
dgarrett622/FuncComp | FuncComp/util.py | 1 | 2464 | # -*- coding: utf-8 -*-
"""
v1: Created on May 31, 2016
author: Daniel Garrett ([email protected])
"""
import numpy as np
def maxdmag(s, ranges, x):
"""Calculates the maximum difference in magnitude for a given population
and apparent separation value
Args:
s (ndarray):
Apparent separation (AU)
ranges (tuple):
pmin (float): minimum geometric albedo
Rmin (float): minimum planetary radius (km)
rmax (float): maximum distance from star (AU)
x (float):
Conversion factor for AU to km
Returns:
maxdmag (ndarray):
Maximum difference in magnitude for given population and separation
"""
pmin, Rmin, rmax = ranges
PhiL = lambda b: (1./np.pi)*(np.sin(b) + (np.pi - b)*np.cos(b))
maxdmag = -2.5*np.log10(pmin*(Rmin*x/rmax)**2*PhiL(np.pi - np.arcsin(s/rmax)))
return maxdmag
def mindmag(s, ranges, x):
"""Calculates the minimum difference in magnitude for a given population
and apparent separation value
Args:
s (ndarray):
Apparent separation (AU)
ranges (tuple):
pmax (float): maximum geometric albedo
Rmax (float): maximum planetary radius (km)
rmin (float): minimum distance from star (AU)
rmax (float): maximum distance from star (AU)
x (float):
Conversion factor for AU to km
Returns:
mindmag (ndarray):
Minimum difference in magnitude for given population and separation
"""
pmax, Rmax, rmin, rmax = ranges
bstar = 1.104728818644543
PhiL = lambda b: (1./np.pi)*(np.sin(b) + (np.pi - b)*np.cos(b))
if type(s) == np.ndarray:
mindmag = -2.5*np.log10(pmax*(Rmax*x*np.sin(bstar)/s)**2*PhiL(bstar))
mindmag[s < rmin*np.sin(bstar)] = -2.5*np.log10(pmax*(Rmax*x/rmin)**2*PhiL(np.arcsin(s[s < rmin*np.sin(bstar)]/rmin)))
mindmag[s > rmax*np.sin(bstar)] = -2.5*np.log10(pmax*(Rmax*x/rmax)**2*PhiL(np.arcsin(s[s > rmax*np.sin(bstar)]/rmax)))
else:
if s < rmin*np.sin(bstar):
mindmag = -2.5*np.log10(pmax*(Rmax*x/rmin)**2*PhiL(np.arcsin(s/rmin)))
elif s > rmax*np.sin(bstar):
mindmag = -2.5*np.log10(pmax*(Rmax*x/rmax)**2*PhiL(np.arcsin(s/rmax)))
else:
mindmag = -2.5*np.log10(pmax*(Rmax*x*np.sin(bstar)/s)**2*PhiL(bstar))
return mindmag | mit | -8,903,001,090,910,545,000 | 34.214286 | 126 | 0.579951 | false | 3.008547 | false | false | false |
algolia/algoliasearch-client-python | algoliasearch/http/serializer.py | 1 | 1899 | import json
import calendar
import datetime
import decimal
import sys
from typing import Union, Any, Dict
from algoliasearch.helpers import get_items
# Python 3
if sys.version_info >= (3, 0):
from urllib.parse import urlencode
else:
from urllib import urlencode # pragma: no cover
class QueryParametersSerializer(object):
@staticmethod
def serialize(query_parameters):
# type: (Dict[str, Any]) -> str
for key, value in get_items(query_parameters):
if isinstance(value, (list, dict)):
value = json.dumps(value)
elif isinstance(value, bool):
value = "true" if value else "false"
query_parameters[key] = value
return urlencode(sorted(get_items(query_parameters), key=lambda val: val[0]))
class SettingsDeserializer(object):
@staticmethod
def deserialize(data):
# type: (Dict[str, Any]) -> dict
keys = {
"attributesToIndex": "searchableAttributes",
"numericAttributesToIndex": "numericAttributesForFiltering",
"slaves": "replicas",
}
for deprecated_key, current_key in get_items(keys):
if deprecated_key in data:
data[current_key] = data.pop(deprecated_key)
return data
class DataSerializer(object):
@staticmethod
def serialize(data):
# type: (Union[Dict[str, Any], list]) -> str
return json.dumps(data, cls=JSONEncoder)
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
# type: (object) -> object
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, datetime.datetime):
return int(calendar.timegm(obj.utctimetuple()))
elif type(obj).__str__ is not object.__str__:
return str(obj)
return json.JSONEncoder.default(self, obj)
| mit | -893,018,040,258,042,100 | 25.746479 | 85 | 0.622959 | false | 4.192053 | false | false | false |
rjw57/foldbeam | foldbeam/web/restapi/user.py | 1 | 1057 | import json
from flask import url_for, make_response
from .flaskapp import app, resource
from .util import *
@app.route('/<username>', methods=['GET', 'PUT'])
def user(username):
if request.method == 'GET':
return get_user(username)
elif request.method == 'PUT':
return put_user(username)
# should never be reached
abort(500) # pragma: no coverage
@resource
def get_user(username):
user = get_user_or_404(username)
return {
'username': user.username,
'resources': {
'maps': { 'url': url_for_user_maps(user) },
'layers': { 'url': url_for_user_layers(user) },
'buckets': { 'url': url_for_user_buckets(user) },
},
}
def put_user(username):
# This will replace the one currently in the DB
user = model.User(username)
user.save()
response = make_response(json.dumps({ 'url': url_for_user(user) }), 201)
response.headers['Location'] = url_for_user(user)
response.headers['Content-Type'] = 'application/json'
return response
| apache-2.0 | 4,488,432,942,735,312,000 | 26.815789 | 76 | 0.614002 | false | 3.535117 | false | false | false |
selective-inference/selective-inference | doc/adjusted_MLE/tests/test_compare_sampler_mle.py | 3 | 5428 | import numpy as np, os, itertools
import pandas as pd
from .comparison_metrics import (sim_xy,
selInf_R,
glmnet_lasso,
coverage,
compare_sampler_MLE)
def compare_sampler_mle(n=500,
p=100,
rho=0.35,
s=5,
beta_type=1,
snr_values=np.array([0.10, 0.15, 0.20, 0.25, 0.30,
0.35, 0.42, 0.71, 1.22, 2.07]),
target="selected",
tuning_rand="lambda.1se",
randomizing_scale= np.sqrt(0.50),
ndraw=50,
outpath=None):
df_selective_inference = pd.DataFrame()
if n > p:
full_dispersion = True
else:
full_dispersion = False
snr_list = []
for snr in snr_values:
snr_list.append(snr*np.ones(2))
output_overall = np.zeros(23)
for i in range(ndraw):
output_overall += np.squeeze(
compare_sampler_MLE(n=n,
p=p,
nval=n,
rho=rho,
s=s,
beta_type=beta_type,
snr=snr,
target = target,
randomizer_scale=randomizing_scale,
full_dispersion=full_dispersion,
tuning_rand=tuning_rand))
nreport = output_overall[22]
randomized_MLE_inf = np.hstack(((output_overall[0:7] /
float(ndraw - nreport)).reshape((1, 7)),
(output_overall[7:11] /
float(ndraw)).reshape((1, 4))))
randomized_sampler_inf = np.hstack(((output_overall[11:18] /
float(ndraw - nreport)).reshape((1, 7)),
(output_overall[18:22] /
float(ndraw)).reshape((1, 4))))
df_MLE = pd.DataFrame(data=randomized_MLE_inf, columns=['coverage',
'length',
'prop-infty',
'tot-active',
'bias',
'sel-power',
'time',
'power',
'power-BH',
'fdr-BH',
'tot-discoveries'])
df_MLE['method'] = "MLE"
df_sampler = pd.DataFrame(data=randomized_sampler_inf, columns=['coverage',
'length',
'prop-infty',
'tot-active',
'bias',
'sel-power',
'time',
'power',
'power-BH',
'fdr-BH',
'tot-discoveries'])
df_sampler['method'] = "Sampler"
df_selective_inference = df_selective_inference.append(df_MLE, ignore_index=True)
df_selective_inference = df_selective_inference.append(df_sampler, ignore_index=True)
snr_list = list(itertools.chain.from_iterable(snr_list))
df_selective_inference['n'] = n
df_selective_inference['p'] = p
df_selective_inference['s'] = s
df_selective_inference['rho'] = rho
df_selective_inference['beta-type'] = beta_type
df_selective_inference['snr'] = pd.Series(np.asarray(snr_list))
df_selective_inference['target'] = target
if outpath is None:
outpath = os.path.dirname(__file__)
outfile_inf_csv = (os.path.join(outpath, "compare_" + str(n) +
"_" + str(p) + "_inference_betatype" +
str(beta_type) + target + "_rho_" + str(rho) + ".csv"))
outfile_inf_html = os.path.join(outpath, "compare_" + str(n) +
"_" + str(p) + "_inference_betatype" +
str(beta_type) + target + "_rho_" + str(rho) + ".html")
df_selective_inference.to_csv(outfile_inf_csv, index=False)
df_selective_inference.to_html(outfile_inf_html)
| bsd-3-clause | 4,470,810,480,543,356,400 | 49.259259 | 93 | 0.336957 | false | 5.021277 | false | false | false |
ENSTA-Bretagne-Shepherd/Shepherd-Ros-Structure | src/shepherd_loc/src/sailboat_interval_estimator.py | 1 | 2548 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alaa El Jawad
~~~~~~~~~~~~~
This node subscribes to the mesured position of the sailboat and
publishes an interval where the sailboat should be
"""
import rospy
from shepherd_msg.msg import RosInterval, SailboatPoseInterval, SailboatPose
# --------------------------------------------------------------------------------
# ROS Node initialisation
# --------------------------------------------------------------------------------
rospy.init_node('sailboatX_locator')
# --------------------------------------------------------------------------------
# Get sensors precision from rosparam (if any)
# --------------------------------------------------------------------------------
# GPS Precision
gps_noise = 2
if rospy.has_param('sailboat_gps_noise'):
gps_noise = rospy.get_param('sailboat_gps_noise')
rospy.loginfo('I Precision was set to %f', gps_noise)
else:
msg = 'GPS Precision was not set in param server, defaulting to: {} m'
msg = msg.format(gps_noise)
rospy.loginfo(msg)
# IMU Precision
imu_noise = 0.2
if rospy.has_param('sailboat_imu_noise'):
imu_noise = rospy.get_param('sailboat_imu_noise')
rospy.loginfo('IMU Precision was set to %f', imu_noise)
else:
msg = 'IMU Precision was not set in param server, defaulting to: {} deg'
msg = msg.format(imu_noise)
rospy.loginfo(msg)
# --------------------------------------------------------------------------------
# Publisher of the interval of pose
# --------------------------------------------------------------------------------
pose_pub = rospy.Publisher('pose_interval',
SailboatPoseInterval, queue_size=1)
est_pub = rospy.Publisher('pose_est',
SailboatPose, queue_size=1)
# --------------------------------------------------------------------------------
# Subscribe to SailboatPose mesured data
# --------------------------------------------------------------------------------
def publish_pose_interval(msg):
global gps_noise, imu_noise
poseI = SailboatPoseInterval()
poseI.x = RosInterval(msg.pose.x - gps_noise, msg.pose.x + gps_noise)
poseI.y = RosInterval(msg.pose.y - gps_noise, msg.pose.y + gps_noise)
poseI.theta = RosInterval(
msg.pose.theta - imu_noise, msg.pose.theta + imu_noise)
pose_pub.publish(poseI)
# for the moment pose_est=pose_noisy
est_pub.publish(msg)
sb_pose_sub = rospy.Subscriber(
'pose_noisy', SailboatPose, publish_pose_interval)
rospy.spin()
| mit | -131,172,703,420,569,500 | 35.4 | 82 | 0.501177 | false | 3.692754 | false | false | false |
codito/pomito | tests/test_pomodoro.py | 1 | 12811 | # -*- coding: utf-8 -*-
"""Tests for pomodoro service."""
import os
import sys
import time
import unittest
import pytest
from unittest.mock import Mock
from pomito import main, pomodoro, task
from pomito.plugins.ui import UIPlugin
from pomito.plugins.task import TaskPlugin
from pomito.test import PomitoTestFactory
class PomodoroServiceTests(unittest.TestCase):
"""Tests for pomodoro service.
- test_break_stopped_without_start
- test_session_stopped_without_start
- test_interruption_stopped_without_start
- test_get_config_gets_value_for_plugin_and_key
- test_get_config_throws_for_invalid_plugin
- test_get_config_throws_for_invalid_key
- test_get_config_throws_for_invalid_inifile
"""
def setUp(self):
test_factory = PomitoTestFactory()
self.pomodoro_service = test_factory.create_fake_service()
self.dummy_task = Mock(spec=task.Task)
self.dummy_callback = Mock()
def tearDown(self):
self.pomodoro_service._pomito_instance.exit()
def test_current_task_none_for_default_pomodoro(self):
assert self.pomodoro_service.current_task is None
def test_current_task_is_set_for_running_session(self):
self.pomodoro_service.start_session(self.dummy_task)
assert self.pomodoro_service.current_task == self.dummy_task
self.pomodoro_service.stop_session()
def test_current_task_none_after_session_stop(self):
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service.stop_session()
assert self.pomodoro_service.current_task is None
def test_get_config_gets_value_for_plugin_and_key(self):
pass
def test_get_config_returns_none_invalid_plugin(self):
val = self.pomodoro_service.get_config("dummy_plugin", "dummy_key")
assert val is None
def test_get_task_plugins_gets_list_of_all_task_plugins(self):
from pomito import plugins
plugins.PLUGINS = {'a': plugins.task.nulltask.NullTask(None),
'b': self.pomodoro_service}
task_plugins = self.pomodoro_service.get_task_plugins()
assert task_plugins == [plugins.PLUGINS['a']]
def test_get_tasks_returns_tasks_for_the_user(self):
self.pomodoro_service.get_tasks()
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_tasks.assert_called_once_with()
def test_get_tasks_by_filter_returns_tasks_match_filter(self):
self.pomodoro_service.get_tasks_by_filter("dummy_filter")
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_tasks_by_filter \
.assert_called_once_with("dummy_filter")
def test_get_task_by_id_returns_task_matching_task_idish(self):
self.pomodoro_service.get_task_by_id(10)
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_task_by_id \
.assert_called_once_with(10)
def test_start_session_throws_if_no_task_is_provided(self):
self.assertRaises(Exception, self.pomodoro_service.start_session, None)
def test_stop_session_waits_for_timer_thread_to_join(self):
self.pomodoro_service.start_session(self.dummy_task)
assert self.pomodoro_service._timer.is_alive()
self.pomodoro_service.stop_session()
assert self.pomodoro_service._timer.is_alive() is False
def test_stop_break_waits_for_timer_thread_to_join(self):
self.pomodoro_service.start_break()
assert self.pomodoro_service._timer.is_alive()
self.pomodoro_service.stop_break()
assert self.pomodoro_service._timer.is_alive() is False
def test_session_started_is_called_with_correct_session_count(self):
self.pomodoro_service.signal_session_started \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.dummy_callback.assert_called_once_with(None,
session_count=0,
session_duration=600,
task=self.dummy_task)
self.pomodoro_service.signal_session_started \
.disconnect(self.dummy_callback)
self.pomodoro_service.stop_session()
def test_session_stopped_for_reason_interrupt(self):
self.pomodoro_service.signal_session_stopped \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service.stop_session()
self.dummy_callback.\
assert_called_once_with(None, session_count=0,
task=self.dummy_task,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_session_stopped \
.disconnect(self.dummy_callback)
def test_session_stopped_for_reason_complete(self):
self.pomodoro_service.signal_session_stopped \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service._timer.trigger_callback(pomodoro.TimerChange.COMPLETE)
self.dummy_callback.assert_called_once_with(None, session_count=1,
task=self.dummy_task,
reason=pomodoro.TimerChange.COMPLETE)
self.pomodoro_service.signal_session_stopped\
.disconnect(self.dummy_callback)
def test_break_started_shortbreak(self):
self._test_break_started(pomodoro.TimerType.SHORT_BREAK, 120)
def test_break_started_longbreak(self):
self.pomodoro_service._session_count = 4
self._test_break_started(pomodoro.TimerType.LONG_BREAK, 300)
def _test_break_started(self, break_type, duration):
self.pomodoro_service.signal_break_started \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.dummy_callback\
.assert_called_once_with(None,
break_type=break_type,
break_duration=duration)
self.pomodoro_service.stop_break()
self.pomodoro_service.signal_break_started \
.disconnect(self.dummy_callback)
def test_break_stopped_shortbreak_for_reason_complete(self):
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service._timer.trigger_callback(pomodoro.TimerChange.COMPLETE)
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.SHORT_BREAK,
reason=pomodoro.TimerChange.COMPLETE)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_break_stopped_shortbreak_for_reason_interrupt(self):
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service.stop_break()
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.SHORT_BREAK,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_break_stopped_longbreak_for_interrupt(self):
self.pomodoro_service._session_count = 4
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service.stop_break()
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.LONG_BREAK,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_get_data_dir_returns_correct_default(self):
expected_data_dir = os.path.join(os.path.expanduser("~"), "pomito")
if sys.platform.startswith("linux"):
home_dir = os.getenv("HOME")
alt_data_dir = os.path.join(home_dir, ".local/share")
expected_data_dir = os.path\
.join(os.getenv("XDG_DATA_HOME") or alt_data_dir, "pomito")
data_dir = self.pomodoro_service.get_data_dir()
assert data_dir == expected_data_dir
def test_get_db_returns_a_valid_database(self):
test_db = "dummy_db"
pomodoro_service = pomodoro.Pomodoro(main.Pomito(database=test_db))
assert pomodoro_service.get_db() == test_db
@pytest.mark.perf
def test_session_started_perf(self):
t = Mock(spec=task.Task)
pomito = main.Pomito(None)
pomito.ui_plugin = DummyUIPlugin()
pomito.task_plugin = Mock(spec=TaskPlugin)
pomito._message_dispatcher.start()
pomito.pomodoro_service.signal_session_started \
.connect(pomito.ui_plugin.notify_session_started, weak=False)
time_start = time.time() # initial timestamp
pomito.pomodoro_service.start_session(t)
time.sleep(1)
time_end = pomito.ui_plugin.timestamp
self.assertAlmostEqual(time_start, time_end, delta=0.1)
pomito.ui_plugin.timestamp = None
pomito.pomodoro_service.stop_session()
pomito.exit()
class TimerTests(unittest.TestCase):
def setUp(self):
self.timestamp_start = 0.0
self.timestamp_end = 0.0
self.delta = 0.0
self.mock_callback = Mock()
def tearDown(self):
self.timestamp_start = self.timestamp_end = self.delta = 0.0
def dummy_callback(self, reason='whatever'):
self.timestamp_end = time.time()
self.delta += (self.timestamp_end - self.timestamp_start)
self.timestamp_start = self.timestamp_end
self.reason = reason
def test_mock_callback_reason_increment_and_complete(self):
timer = pomodoro.Timer(0.2, self.mock_callback, 0.1)
timer.start()
time.sleep(0.3)
assert self.mock_callback.call_count == 2
self.assertListEqual(self.mock_callback.call_args_list,
[((pomodoro.TimerChange.INCREMENT,), {}), ((pomodoro.TimerChange.COMPLETE,), {})],
'invalid notify_reason')
def test_mock_callback_reason_interrupt(self):
timer = pomodoro.Timer(10, self.mock_callback, 1)
timer.start()
timer.stop()
time.sleep(0.1)
assert self.mock_callback.call_count == 1
self.assertListEqual(self.mock_callback.call_args_list,
[((pomodoro.TimerChange.INTERRUPT,), {})],
'invalid notify_reason')
def test_start_throws_when_called_on_same_thread(self):
def callback_with_catch(reason):
try:
timer.start()
assert False # expect previous call to throw
except RuntimeError:
pass
timer = pomodoro.Timer(10, callback_with_catch, 1)
timer.start()
timer.stop()
time.sleep(0.1)
def test_stop_throws_when_called_on_same_thread(self):
def callback_with_catch(reason):
try:
timer.stop()
assert False # expect previous call to throw
except RuntimeError:
pass
timer = pomodoro.Timer(10, callback_with_catch, 1)
timer.start()
timer.stop()
time.sleep(0.1)
@pytest.mark.perf
def test_callback_granular(self):
duration = 60.00
delta_granular = 1.0 # windows
if sys.platform.startswith("linux"):
delta_granular = 0.03
timer = pomodoro.Timer(duration, self.dummy_callback)
self.timestamp_start = time.time()
timer.start()
time.sleep(duration + 2)
assert self.reason == pomodoro.TimerChange.COMPLETE
self.assertAlmostEqual(self.delta, duration, delta=delta_granular)
class DummyUIPlugin(UIPlugin):
def __init__(self):
"""Create an instance of dummy plugin."""
self.timestamp = 100.0
return
def run(self):
pass
def notify_session_started(self, sender, **kwargs):
self.timestamp = time.time()
return
def initialize(self):
pass
| mit | -7,101,168,220,726,572,000 | 35.189266 | 111 | 0.615331 | false | 3.621996 | true | false | false |
zhangzewen/Network_Lib | ML/Apriori_TC.py | 1 | 5653 | #FileName:Apriori.py
import sys
import copy
def InsertToSortedList(tlist,item):
i = 0
while i < len(tlist):
if item < tlist[i]:
break
elif item == tlist[i]:
return
i+=1
tlist.insert(i, item)
def CompItemSet(x,y):
i = 0
while i < len(x) and i < len(y):
if x[i] < y[i]:
return -1
elif x[i] > y[i]:
return 1
i += 1
if i == len(x) and i == len(y):
return 0
elif i < len(y):
return -1
return 1
RawFile = "casts.list.txt"
ResultFile = "casts.fis.txt"
infile = file(RawFile,'r')
s = infile.readline().lower()
WordIDTable = {}
WordList = []
WordFreqSet = {}
TSet = [] #Transaction Database
ItemSet = [] #A transaction
# load transactions
while len(s) > 0:
items = s.strip().split('\t')
for i in range(1, len(items)):
tmpstr = items[i].strip()
if tmpstr not in WordIDTable:
WordList.append(tmpstr)
WordIDTable[tmpstr] = len(WordList)
WordFreqSet[WordIDTable[tmpstr]] = 1
else:
WordFreqSet[WordIDTable[tmpstr]] += 1
InsertToSortedList(ItemSet,WordIDTable[tmpstr])
TSet.append(ItemSet)
ItemSet = []
s = infile.readline().lower()
infile.close()
print len(WordList), "person names loaded!"
print len(TSet), "transactions loaded!"
#ItemSetComp = lambda x,y:CompItemSet(x,y)
TSet.sort(CompItemSet)
MinSupCount = 5 # set the minimum support
LSet = [] # frequent item set
CSet = [] # candidate item set
CSet.append([])
# get 1-frequent item set
LSet.append([])
for (item,freq) in WordFreqSet.items():
if freq >= MinSupCount:
LSet[0].append([item])
LSet[0].sort(CompItemSet)
print len(LSet[0]), "1-frequent item sets found!"
# remove transactions containing no 1-frequent item set
# and get 2-frequent item set
Freq2Dic = {}
for itemset in TSet:
i = 0
while i < len(itemset):
if WordFreqSet[itemset[i]] < MinSupCount:
itemset.remove(itemset[i])
i += 1
if len(itemset) < 1:
TSet.remove(itemset)
elif len(itemset) > 1:
# generate the dictionary of 2-item pairs, calculate the frequency
for j in range(len(itemset)-1):
for k in range(j+1,len(itemset)):
temps = str(itemset[j])+'-'+str(itemset[k])
if temps not in Freq2Dic:
Freq2Dic[temps] = 1
else:
Freq2Dic[temps] += 1
# Get 2-frequent item set
CSet.append([])
LSet.append([])
for (item,freq) in Freq2Dic.items():
if freq >= MinSupCount:
templist = []
parts = item.split('-')
templist.append(int(parts[0]))
templist.append(int(parts[1]))
LSet[1].append(templist)
LSet[1].sort(CompItemSet)
print len(TSet), "transactions after pruning!"
def IsEqual(list1, list2):
i = 0
while i < len(list1) and i < len(list2):
if list1[i] != list2[i]:
return False
i += 1
if i == len(list1) and i == len(list2):
return True
else:
return False
###################################
# for pruning
# 1: You need to decide whether 'newSet' is included in the candidate item sets for (k+1)
# 'tmpDic' is the dictionary built from k-frequent item sets
def IsValid(newSet, tmpDic):
# TODO:
for i in range(len(newSet)-2):
s = ""
for j in range(len(newSet)):
if j != i:
s += "-" + str(newSet[j])
if s[1:] not in tmpDic:
return False
return True
# link and prune
def GenCand(k, LSet, CSet):
# generate the dictionary built from k-frequent item sets
PreSetDic = {}
for itemset in LSet[k-1]:
s = ""
for j in range(len(itemset)):
s += "-" + str(itemset[j])
temps = s[1:]
if temps not in PreSetDic:
PreSetDic[temps] = True
else:
print "Duplicate frequent itemset found!"
###################################
# 2: You need to generate the candidate item sets for (k+1)
# You MAY call the function 'IsValid(,)' you have built, and use the dictionary 'PreSetDic' generated above
# TODO:
for i in range(len(LSet[k-1])-1):
itemSet1 = LSet[k-1][i]
for j in range(i+1,len(LSet[k-1])):
n = 0
itemSet2 = LSet[k-1][j]
while n < len(itemSet1) and n < len(itemSet2):
if itemSet1[n] != itemSet2[n]:
break
n += 1
if len(itemSet1) - n == 1 and len(itemSet2) - n == 1:
newItemSet = copy.copy(itemSet1)
newItemSet.append(itemSet2[n])
if IsValid(newItemSet, PreSetDic):
CSet[k].append(newItemSet)
else:
break
def GetFreq(candlist,tarlist):
ci = 0
ti = 0
while ci < len(candlist) and ti < len(tarlist):
if candlist[ci] < tarlist[ti]:
break
elif candlist[ci] == tarlist[ti]:
ci += 1
ti += 1
else:
ti += 1
if len(candlist) == ci:
return 1
else:
return 0
# print the solution info
k = 2
while len(LSet[k-1]) > 1:
print len(LSet[k-1]), str(k)+"-frequent item sets found!"
CSet.append([])
GenCand(k,LSet,CSet) # You are supposed to complete this function
print len(CSet[k]), str(k+1)+"-candidate item sets found!"
LSet.append([])
for candlist in CSet[k]:
count = 0
for tarlist in TSet:
count += GetFreq(candlist,tarlist)
if count >= MinSupCount:
LSet[k].append(candlist)
k += 1
# write the result
outfile = file(ResultFile, 'w')
i = 1
num = 0
for fislist in LSet:
if len(fislist) < 1:
LSet.remove(fislist)
continue
num += len(fislist)
outfile.write(str(i)+"-frequent item sets:\r\n")
for fis in fislist:
for itemid in fis:
outfile.write(WordList[itemid-1])
outfile.write('\t')
outfile.write('\r\n')
i += 1
outfile.close()
print num, "frequent item sets in total!"
| gpl-2.0 | 4,206,485,629,394,449,000 | 18.70696 | 108 | 0.597382 | false | 2.694471 | false | false | false |
mazaclub/hashmal | hashmal_lib/plugins/chainparams.py | 1 | 1409 |
from PyQt4 import QtCore
from hashmal_lib.core import chainparams
from base import Plugin, BasePluginUI, Category
def make_plugin():
p = Plugin(ChainParams)
p.has_gui = False
return p
class ChainParamsObject(QtCore.QObject):
"""This class exists so that a signal can be emitted when chainparams presets change."""
paramsPresetsChanged = QtCore.pyqtSignal()
class ChainParams(BasePluginUI):
"""For augmentation purposes, we use this plugin to help with chainparams presets."""
tool_name = 'Chainparams'
description = 'Chainparams allows plugins to add chainparams presets for Hashmal to use.'
category = Category.Core
def __init__(self, *args):
super(ChainParams, self).__init__(*args)
self.chainparams_object = ChainParamsObject()
self.paramsPresetsChanged = self.chainparams_object.paramsPresetsChanged
self.augment('chainparams_presets', None, callback=self.on_chainparams_augmented)
def add_params_preset(self, preset):
try:
chainparams.add_preset(preset)
except Exception as e:
self.error(str(e))
def on_chainparams_augmented(self, data):
# Assume data is iterable.
try:
for i in data:
self.add_params_preset(i)
return
# data is not an iterable.
except Exception:
self.add_params_preset(data)
| gpl-3.0 | 6,010,033,544,786,269,000 | 32.547619 | 93 | 0.66785 | false | 4.060519 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.