repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
stevecshanks/trello-next-actions
nextactions/card.py
1
1191
from urllib.parse import urlparse class Card: AUTO_GENERATED_TEXT = 'Auto-created by TrelloNextActions' def __init__(self, trello, json): self._trello = trello self.id = json['id'] self.name = json['name'] self.board_id = json['idBoard'] self.description = json['desc'] self.url = json['url'] def isAutoGenerated(self): return Card.AUTO_GENERATED_TEXT in self.description def getProjectBoard(self): board_id = self._getProjectBoardId() return self._trello.getBoardById(board_id) def _getProjectBoardId(self): url_components = urlparse(self.description) path_segments = url_components.path.split('/') if (len(path_segments) >= 3): return path_segments[2] else: raise ValueError("Description could not be parsed as project URL") def __eq__(self, other): return self.id == other.id def linksTo(self, other): return self.description.startswith(other.url) def archive(self): self._trello.put( 'https://api.trello.com/1/cards/' + self.id + '/closed', {'value': "true"} )
mit
3,288,807,941,837,223,400
28.04878
78
0.596977
false
petrjasek/superdesk-core
superdesk/sequences.py
1
2026
import superdesk import traceback from superdesk import get_resource_service from .resource import Resource from .services import BaseService import logging logger = logging.getLogger(__name__) def init_app(app): endpoint_name = "sequences" service = SequencesService(endpoint_name, backend=superdesk.get_backend()) SequencesResource(endpoint_name, app=app, service=service) class SequencesResource(Resource): schema = { "key": {"type": "string", "required": True, "nullable": False, "empty": False, "iunique": True}, "sequence_number": { "type": "number", "default": 1, }, } etag_ignore_fields = ["sequence_number", "name"] internal_resource = True mongo_indexes = { "key_1": ([("key", 1)], {"unique": True}), } class SequencesService(BaseService): def get_next_sequence_number(self, key_name, max_seq_number=None, min_seq_number=1): """ Generates Sequence Number :param key: key to identify the sequence :param max_seq_num: default None, maximal possible value, None means no upper limit :param min_seq_num: default 1, init value, sequence will start from the NEXT one :returns: sequence number """ if not key_name: logger.error("Empty sequence key is used: {}".format("\n".join(traceback.format_stack()))) raise KeyError("Sequence key cannot be empty") target_resource = get_resource_service("sequences") sequence_number = target_resource.find_and_modify( query={"key": key_name}, update={"$inc": {"sequence_number": 1}}, upsert=True, new=True ).get("sequence_number") if max_seq_number: if sequence_number > max_seq_number: target_resource.find_and_modify( query={"key": key_name}, update={"$set": {"sequence_number": min_seq_number}} ) sequence_number = min_seq_number return sequence_number
agpl-3.0
-385,703,106,562,796,540
32.766667
104
0.616486
false
jiaxiaolei/pycate
pycate/model/refresh_model.py
1
1488
# -*- coding:utf-8 -*- # 预约更新 # import tornpg import libs import peewee from core.base_model import BaseModel class phpmps_refresh(BaseModel): uid = peewee.CharField(max_length=36, null=False, unique=True, help_text='类别的ID', primary_key=True) refresh_time = peewee.IntegerField(null=False) info_uid = peewee.CharField(max_length=36, null=False) time_str = peewee.CharField(max_length=36) class MRefresh(BaseModel): def __init__(self): try: phpmps_refresh.create_table() pass except: pass def getall(self): return (phpmps_refresh.select().order_by('cityid')) def get_by_id(self, info_id): ''' 根据分类的ID进行选择 ''' return (phpmps_refresh.select().where(phpmps_refresh.info_uid == info_id)) def del_by_id(self, uid): try: entry = phpmps_refresh.delete().where(phpmps_refresh.uid == uid) entry.execute() return True except: return False def insert_data(self, par_arr): uid = libs.tool.get_uid() # libs.tool.mark_it() try: entry = phpmps_refresh.create( uid=uid, info_uid=par_arr[0], refresh_time=par_arr[1], time_str=par_arr[2], ) return True except: return False
mit
27,952,428,316,652,670
24.472727
103
0.531593
false
scanner-research/scanner
examples/how-tos/caffe/resnet.py
1
1333
from scannerpy import Database, DeviceType, Job from scannerpy.stdlib import NetDescriptor import numpy as np import cv2 import struct import sys import os sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') import util video_path = util.download_video() if len(sys.argv) <= 1 else sys.argv[1] print('Performing classification on video {}'.format(video_path)) video_name = os.path.splitext(os.path.basename(video_path))[0] with Database() as db: [input_table], _ = db.ingest_videos( [(video_name, video_path)], force=True) descriptor = NetDescriptor.from_file(db, 'nets/resnet.toml') batch_size = 48 frame = db.sources.FrameColumn() caffe_input = db.ops.CaffeInput( frame = frame, net_descriptor = descriptor.as_proto(), batch_size = batch_size, device=DeviceType.GPU) caffe_output = db.ops.Caffe( caffe_frame = caffe_input, net_descriptor = descriptor.as_proto(), batch_size = batch_size, batch = batch_size, device=DeviceType.GPU) output = db.sinks.Column(columns={'softmax': caffe_output}) job = Job(op_args={ frame: input_table.column('frame'), output: input_table.name() + '_classification' }) [output] = db.run(output=output, jobs=[job], pipeline_instances_per_node=1)
apache-2.0
-8,819,657,042,652,358,000
31.512195
79
0.658665
false
bd-j/sedpy
sedpy/photometer.py
1
10083
# Simple Aperture photometry. kind of a stupid class dependence. # Ideally a photometer object should take an image and a region object # as arguments, where the region object is an instance of a particualr aperture class and # can return an in_region boolean (or perhaps a 'fraction') for # any position(s). As it is now the 'photometer' object (called Aperture) # is actually subclassed by the more detailed apertures/regions, # and requires passing a shape disctionary as well. redundant import sys import numpy as np from numpy import hypot, sqrt #the below are for some of the more arcane sky measurement methods try: from scipy.optimize import curve_fit import sklearn from astroML.density_estimation import bayesian_blocks import matplotlib.pyplot as pl except ImportError: pass thismod = sys.modules[__name__] class Photometer(object): """ Trying for a better class dependence. Basically wraps the image in an object with photometry methods. Incomplete """ def __init__(self, image, wcs=None, ivar=None): self.nx, self.ny = image.shape self.image = image.flatten() self.wcs = wcs self.ivar = ivar yy, xx = np.indices(self.nx, self.ny) if wcs is not None: self._x, self._y = wcs.wcs_pix2world(xx, yy, 0) else: self._x, self._y = xx, yy def measure_flux(self, aperture, background): """ Measure background subtracted flux. Takes an aperture object, and a local background object. """ o, a, e = self.object_flux(aperture) b, ba, be = background.evaluate(self) flux = o - a*b flux_var = e*e + a*a*be*be/ba return flux, sqrt(flux_var) def object_flux(self, aperture, weights=1.0): """ Measure total flux (source + background) within an aperture. Takes an image, and an aperture object. """ fracs = aperture.contains(self._x, self._y) inds = fracs > 0 if self.ivar is not None: unc = sqrt((fracs[inds]/self.ivar[inds]).sum()) else: unc = np.nan return (self.image * weights * fracs).sum(), fracs.sum(), unc class Aperture(object): def world_to_pixels(shape, wcs): pass def object_flux(self, shape, image, ivar = None): """Measure total flux within an aperture (source + background)""" inds, fracs = self.pixnum(**shape) unc = 0 if ivar is not None: unc = sqrt((fracs/ivar[inds[0], inds[1]]).sum()) return (image[inds[0], inds[1]]*fracs).sum(), fracs.sum(), unc def measure_flux(self, shape, image, wcs = None, skypars = None, ivar = None): """Measure background subtracted flux.""" o, a, e = self.object_flux(shape, image, ivar = ivar) b, ba, be = self.background.evaluate(image, skypars) flux = o - a*b flux_var = e*e + a*a*be*be/ba return flux, sqrt(flux_var) #def get_flux(self, image, ivar = None): # return self.measure_flux(self.shape, image, ivar = ivar, skypars = self.skypars, wcs = self.wcs) class Circular(Aperture): def __init__(self, exact = False): if exact is True: self.pixnum = circle_frac_exact else: self.pixnum = circle_frac_quick self.background = ZeroSky() class Elliptical(Aperture): def __init__(self): self.pixnum = ellipse_frac_quick() self.background = ZeroSky() class Box(Aperture): def __init__(self): self.pixnum = box_frac_quick() self.background = ZeroSky() # ######## Classes for sky measurement ###### class Background(object): def evaluate(self, image, skypars): inds, fracs = self.pixnum(**skypars) value, sdpp = self.skystats(image[inds[0], inds[1]], **skypars) return value, len(inds), sdpp class Annulus(Background): def __init__(self, bgtype = 'quartile_sky'): self.pixnum = circle_frac_quick self.skystats = getattr(thismod, bgtype) class EllipticalAnnulus(Background): def __init__(self, bgtype = 'quartile_sky'): self.pixnum = ellipse_frac_quick self.skystats = getattr(thismod,bgtype) class ZeroSky(Background): """A class for sky values of zero, or for user defined sky statistics. The return_value is a tuple giving (sky, sky_area, sigma_sky_per_pixel)""" def __init__(self, bgtype = 'quartile_sky', return_value = (0,1,0)): self.pixnum = None self.skystats = None self.return_value = return_value def evaluate(self,image, skypars): return self.return_value ### Pixnum methods #### def circle_frac_quick(xcen = 0, ycen = 0, radius = 1, inner_radius = None, subpixels = 1, **extras): """obtain fractional pixel coverage. optionally use subpixels to increase precison (though this doesn't seem to help). Assumes pixel centers have coordinates X.5, Y.5 """ #setup center = np.array([xcen,ycen]) sz = np.ceil((radius+1)*2) start = np.floor(center +0.5-radius) center = center*subpixels radius = radius*subpixels sz = sz*subpixels start = (start-1)*subpixels if (start < 0).any(): raise ValueError('Aperture extends off image edge') off = center - start - 0.5 yy, xx = np.ogrid[ 0:sz, 0:sz ] rr = hypot(xx - off[0], yy-off[1]) #find pixels within the radius within = (radius+0.5) - rr within[within > 1.0] = 1.0 within[within < 0] = 0. #if it's an annulus if inner_radius is not None: within_inner = inner_radius*subpixels + 0.5 - rr within_inner[within_inner < 0.0] = 0.0 within_inner[within_inner > 1.0] = 1.0 within = within - within_inner an = within #rebin if you used subpixels if subpixels != 1: an = an.reshape((an.shape[0]/subpixels, subpixels, an.shape[1]/subpixels, subpixels)).mean(1).mean(2) #pick the pixels to rturn, and get their fractional coverage pix1 = np.where(an > 0.0) fracs = an[pix1[0],pix1[1]] x = (pix1[0] + start[0]/subpixels).astype('i8') y = (pix1[1] + start[1]/subpixels).astype('i8') return (x, y), fracs def circle_frac_exact(xcen, ycen, radius): pass def ellipse_frac_quick(xcen = 0, ycen = 0, a = 1, b = 1, pa = 0, precision = None): yy, xx = np.ogrid[ 0:sz, 0:sz ] dx, dy = (xx - off[0]), (yy-off[1]) within = 1 - np.sqrt(((dx * np.cos(pa) - dy * np.sin(pa))/a)**2 + ((dx * np.sin(pa) + dy * np.cos(pa))/b)**2) within[within > 1.0] = 1.0 within[within < 0] = 0. #rebin if you used subpixels if subpixels != 1: an = an.reshape((an.shape[0]/subpixels, subpixels, an.shape[1]/subpixels, subpixels)).mean(1).mean(2) #pick the pixels to rturn, and get their fractional coverage pix1 = np.where(an > 0.0) fracs = an[pix1[0],pix1[1]] x = (pix1[0] + start[0]/subpixels).astype('i8') y = (pix1[1] + start[1]/subpixels).astype('i8') return (x, y), fracs #####SKY statistics determination methods ##### def quartile_sky(values, percentiles = [0.16, 0.5, 0.84], **extras): """Use the median and 16th percentile to estimate the standard deviation per pixel.""" percentiles = np.asarray(percentiles) npix = len(values) #oo = np.argsort(values) qval = np.sort(values)[np.round(npix*percentiles).astype('i8')] #qval = values[oo[np.round(npix*percentiles)]] return qval[1], qval[1]-qval[0] def gaussfit_sky(values, p_thresh = 0.65, plot = False, **extras): """Fit a gaussian to the lower part of a histogram of the sky values. The histogram bins are estimated using Bayesian blocks. p_thresh gives the percentile below which the gaussian is fitted to the data. Return central value and estimate of standard deviation per pixel """ bins = bayesian_blocks(values) print(len(bins),bins) #dbin = bins[1:]-bins[:-1] cbin = (bins[1:]+bins[:-1])/2 hist = np.histogram(values, bins = bins, range = (bins.min(), bins.max()), density = True) #pdf = hist/dbin val_thresh = np.percentile(values, p_thresh) lower = cbin < p_thresh def gauss(x, *p): A, mu, sigma = p return A*np.exp(-(x-mu)**2/(2.*sigma**2)) # p0 is the initial guess for the fitting coefficients (A, mu and sigma above) p0 = [np.max(hist[0]), values.mean(), values.std()] coeff, var_matrix = curve_fit(gauss, cbin[lower], hist[0][lower], p0=p0) if plot: print(len(hist[1]), len(hist[0]),type(coeff)) pl.figure() pl.plot(cbin,hist[0], color = 'b') pl.plot(cbin, gauss(cbin, [coeff[0], coeff[1], coeff[2]]), color = 'r') pl.axvline(val_thresh) return coeff[1], coeff[2] def gmm_sky(values, **extras): """Use a gaussian mixture model, via expectation maximization. of course, there's only one gaussian. could add another for faint sources, bad pixels, but...""" gmm = sklearn.mixture.GMM() r = gmm.fit(values) return r.means_[0, 0], np.sqrt(r.covars_[0, 0]) def sigclip_sky(values, sigma = [3, 2.25], minlength = 5, **extras): """Use iterative sigma clipping""" def between(vals, sigs): m, s = vals.mean(), vals.std() return (vals < m+sig[1]*s) & (vals > m-sig[0]*s) while ( (False in between(values, sigma)) & (len(values) > minlength) ): values = values[between(values,sigma)] return values.mean(), values.std() ##### Centroiding ####### def centroid(images): """Dumb dumb centroiding. assumes x and y axes are the last two dimensions of images. Something is wrong with the broadcasting. absolutely *have* to include weighting""" sz = images.shape[-2:] xg = np.arange(sz[0]) yg = np.arange(sz[1]) denom = images.sum(axis = (-1, -2)) y = (yg[None,None,:]*images).sum(axis = (-2, -1)) / denom x = (xg[None,:,None]*images).sum(axis = (-2, -1)) / denom return x, y
gpl-2.0
1,693,906,440,199,580,000
33.064189
113
0.60726
false
MCME/WebStatus
mcmeAPI/db/models.py
1
1641
from sqlalchemy import Column, String, Integer, DateTime, PickleType, Boolean from mcmeAPI.db import Base class User(Base): __tablename__ = 'users' name = Column(String(20), primary_key=True) group = Column(String(15)) #rank updated = Column(DateTime()) ob = Column(Boolean) staff = Column(Boolean) permissions = Column(PickleType) worlds = Column(PickleType) @property def serialize(self): return {'group':self.group, 'name':self.name, 'ob':self.ob, 'staff':self.staff, 'permissions':self.permissions, 'worlds':self.worlds, 'updated':dump_datetime(self.updated) } def __repr__(self): return '<User %r>' % (self.name) class Server(Base): __tablename__ = 'servers' name = Column(String(15), primary_key=True) status = Column(String(10)) players = Column(PickleType) maxplayers = Column(Integer) num_players = Column(Integer) plugins = Column(PickleType) updated = Column(DateTime()) @property def serialize(self): return ({'name': self.name, 'status': self.status, 'players':self.players, 'num_players':self.num_players, 'maxplayers':self.maxplayers, 'plugins':self.plugins, 'updated':dump_datetime(self.updated)}) def dump_datetime(value): """Deserialize datetime object into string form for JSON processing.""" if value is None: return None return value.strftime("%Y-%m-%dT%H:%M:%S")
gpl-3.0
-4,360,253,035,946,446,300
29.407407
77
0.575868
false
JioCloud/oslo.messaging
oslo/messaging/opts.py
1
2535
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_opts' ] import copy import itertools from oslo.messaging._drivers import amqp from oslo.messaging._drivers import common as drivers_common from oslo.messaging._drivers import impl_qpid from oslo.messaging._drivers import impl_rabbit from oslo.messaging._drivers import impl_zmq from oslo.messaging._drivers import matchmaker from oslo.messaging._drivers import matchmaker_redis from oslo.messaging._drivers import matchmaker_ring from oslo.messaging._executors import impl_eventlet from oslo.messaging.notify import notifier from oslo.messaging.rpc import client from oslo.messaging import transport _global_opt_lists = [ amqp.amqp_opts, drivers_common._exception_opts, impl_qpid.qpid_opts, impl_rabbit.rabbit_opts, impl_zmq.zmq_opts, matchmaker.matchmaker_opts, matchmaker_redis.matchmaker_redis_opts, impl_eventlet._eventlet_opts, notifier._notifier_opts, client._client_opts, transport._transport_opts ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), ('matchmaker_ring', matchmaker_ring.matchmaker_opts), ] def list_opts(): """Return a list of oslo.config options available in the library. The returned list includes all oslo.config options which may be registered at runtime by the library. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'oslo.messaging' entry point under the 'oslo.config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by this library. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _opts]
apache-2.0
-180,154,284,497,711,870
32.8
78
0.7357
false
ubuntunux/JumpJump
PyInterpreter/Tutorial.py
1
6321
import Utility as Util from Utility import * from Constants import * from collections import OrderedDict import browser #---------------------# # CLASS : Tutorial layout class #---------------------# class TutorialLayout: def __init__(self, ui): self.ui = ui self.tutorialMap = OrderedDict({}) layout_height = 0 self.screen = Screen(name=szTutorial) # screen menu layout self.screenMenuLayout = BoxLayout(orientation="horizontal", size_hint=(1, None), height="35dp") btn_console = Button(text="Console", background_color=[1.5,0.8,0.8,2]) btn_editor = Button(text="Code Editor", background_color=[0.8,1.5,0.8,2]) btn_tutorial = Button(text="Python Tutorial", background_color=[0.8,0.8,1.5,2]) btn_console.bind(on_release=lambda inst:self.ui.setMode(szConsole)) btn_editor.bind(on_release=lambda inst:self.ui.setMode(szEditor)) self.screenMenuLayout.add_widget(btn_console) self.screenMenuLayout.add_widget(btn_editor) self.screenMenuLayout.add_widget(btn_tutorial) self.screen.add_widget(self.screenMenuLayout) self.tutorialSV = ScrollView(size_hint=(1, None), size=(W,H - self.screenMenuLayout.size[1]), pos=(0, self.screenMenuLayout.top)) with self.tutorialSV.canvas.before: Color(0.1, 0.1, 0.2, 1) Rectangle(size=WH) self.tutorialLayout = BoxLayout(orientation="vertical", size_hint_y = None) self.tutorialSV.add_widget(self.tutorialLayout) self.screen.add_widget(self.tutorialSV) # add title header image = Image(source=pythonLogo, allow_stretch=True, keep_ratio=True, size_hint_x=None) label_Title = Label(text = "Python Tutorials", font_size="30dp", bold=True, color=[1.0,0.7,0.4,1]) titleLayout = BoxLayout(orientation="horizontal", padding=[metrics.dp(20),0,0,0], size_hint=(1, 50.0/30.0)) titleLayout.add_widget(image) titleLayout.add_widget(label_Title) self.tutorialLayout.add_widget(titleLayout) layout_height += metrics.dp(50) # add python tutorial url url = "https://docs.python.org/2.7/tutorial" btn_url = Button(text="Excepts from {}".format(url), font_size="13dp") btn_url.bind(on_release=lambda inst:browser.open_url(url)) self.tutorialLayout.add_widget(btn_url) layout_height += metrics.dp(30) # add my comment self.tutorialLayout.add_widget(Label(text="I will update more tutorial.", font_size="15dp", color=[1.0,0.85,0.7,1])) self.tutorialLayout.add_widget(Label(text=" ", font_size="12dp")) layout_height += metrics.dp(50) # create tutorial buttons fileList = {} for dirpath, dirnames, filenames in os.walk(tutorialDir): filenames.sort() fileList[dirpath] = filenames keys = fileList.keys() keys.sort() for dirpath in fileList: # add category label if fileList[dirpath]: label_category = Label(text = os.path.split(dirpath)[-1], font_size="18dp", halign="left", bold=True, color=[1.0,0.85,0.7,1], size_hint_y=40.0/30.0) self.tutorialLayout.add_widget(label_category) layout_height += metrics.dp(40) # add tutorials for filename in fileList[dirpath]: # load tutorial file f = open(os.path.join(dirpath, filename), "r") lines = list(f) f.close() desc = "".join(lines) # add a button btn = Button(text=desc[:desc.find("\n")], font_size="15dp", size_hint_y=1, background_color=[0.8, 0.8, 1.5, 1]) btn.bind(on_release = self.chooseTutorial) self.tutorialMap[btn] = desc self.tutorialLayout.add_widget(btn) layout_height += metrics.dp(30) # refresh height self.tutorialLayout.height = layout_height def chooseTutorial(self, btn): if btn in self.tutorialMap: self.ui.clearOutput() desc = self.tutorialMap[btn] self.ui.displayText("\n-------------------------", 1) # split desc by line lines = desc.split("\n") # show title if lines: self.ui.displayText("Tutorial : " + lines.pop(0), 1) # show tutorial body textList = [] isInCode = False for line in lines: if line.startswith("[code]"): self.ui.displayText("\n".join(textList), 1) textList = [] elif line.startswith("[/code]"): self.ui.displayText("\n".join(textList), 1, background_color=(0.5, 0.5, 1, 0.35)) textList = [] else: textList.append(line) else: if isInCode: self.ui.displayText("\n".join(textList), 1, background_color=(0.5, 0.5, 1, 0.35)) else: self.ui.displayText("\n".join(textList), 1) # end of tutorial body self.ui.displayText("------------------------\n\nLet's try this!!\n", 1) # next, prev tutorial buttons padding = kivy.metrics.dp(20) fontSize = kivy.metrics.dp(14) spacing = kivy.metrics.dp(20) layout = BoxLayout(size_hint=(1,None), height="70dp", spacing=spacing, padding=[0, padding, 0, padding]) maxCharacter = int(math.ceil((W-spacing) / fontSize)) - 2 buttons = self.tutorialMap.keys() curIndex = buttons.index(btn) btnColor = [0.8, 0.8, 1.5, 1] btn_prev = Button(text="----", font_size=fontSize, background_color=btnColor) btn_next = Button(text="----", font_size=fontSize, background_color=btnColor) if curIndex > 0: btn_prevTutorial = buttons[curIndex - 1] if len(btn_prevTutorial.text) >= maxCharacter: btn_prev.text = btn_prevTutorial.text[:maxCharacter-3] + "..." else: btn_prev.text = btn_prevTutorial.text btn_prev.bind(on_release = lambda inst:self.chooseTutorial(btn_prevTutorial)) if curIndex < len(buttons) - 1: btn_nextTutorial = buttons[curIndex + 1] if len(btn_nextTutorial.text) >= maxCharacter: btn_next.text = btn_nextTutorial.text[:maxCharacter-3] + "..." else: btn_next.text = btn_nextTutorial.text btn_next.bind(on_release = lambda inst:self.chooseTutorial(btn_nextTutorial)) layout.add_widget(btn_prev) layout.add_widget(btn_next) self.ui.outputLayout_add_widget(layout) self.ui.setMode(szConsole) def touchPrev(self): self.ui.setMode(szConsole)
gpl-3.0
-1,433,163,130,658,457,600
40.592105
156
0.626325
false
kerwinxu/barcodeManager
zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Node/Alias.py
1
4400
"""scons.Node.Alias Alias nodes. This creates a hash of global Aliases (dummy targets). """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Node/Alias.py 5023 2010/06/14 22:05:46 scons" import collections import SCons.Errors import SCons.Node import SCons.Util class AliasNameSpace(collections.UserDict): def Alias(self, name, **kw): if isinstance(name, SCons.Node.Alias.Alias): return name try: a = self[name] except KeyError: a = SCons.Node.Alias.Alias(name, **kw) self[name] = a return a def lookup(self, name, **kw): try: return self[name] except KeyError: return None class AliasNodeInfo(SCons.Node.NodeInfoBase): current_version_id = 1 field_list = ['csig'] def str_to_node(self, s): return default_ans.Alias(s) class AliasBuildInfo(SCons.Node.BuildInfoBase): current_version_id = 1 class Alias(SCons.Node.Node): NodeInfo = AliasNodeInfo BuildInfo = AliasBuildInfo def __init__(self, name): SCons.Node.Node.__init__(self) self.name = name def str_for_display(self): return '"' + self.__str__() + '"' def __str__(self): return self.name def make_ready(self): self.get_csig() really_build = SCons.Node.Node.build is_up_to_date = SCons.Node.Node.children_are_up_to_date def is_under(self, dir): # Make Alias nodes get built regardless of # what directory scons was run from. Alias nodes # are outside the filesystem: return 1 def get_contents(self): """The contents of an alias is the concatenation of the content signatures of all its sources.""" childsigs = [n.get_csig() for n in self.children()] return ''.join(childsigs) def sconsign(self): """An Alias is not recorded in .sconsign files""" pass # # # def changed_since_last_build(self, target, prev_ni): cur_csig = self.get_csig() try: return cur_csig != prev_ni.csig except AttributeError: return 1 def build(self): """A "builder" for aliases.""" pass def convert(self): try: del self.builder except AttributeError: pass self.reset_executor() self.build = self.really_build def get_csig(self): """ Generate a node's content signature, the digested signature of its content. node - the node cache - alternate node to use for the signature cache returns - the content signature """ try: return self.ninfo.csig except AttributeError: pass contents = self.get_contents() csig = SCons.Util.MD5signature(contents) self.get_ninfo().csig = csig return csig default_ans = AliasNameSpace() SCons.Node.arg2nodes_lookups.append(default_ans.lookup) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
bsd-2-clause
1,780,583,719,408,763,100
26.947368
95
0.623409
false
zubinshah/algorithms
data-structures/python/sorting.py
1
11069
#!/usr/local/bin/python """ SORTING ALGORITHMS + Bubble Sort + Selection Sort This program will test *num* entries as an unsorted array, and sort them as well as log the time taken to understand the performances of varous sorting algorithms. """ #****************************************************************************** import random import sys import time debug = False #****************************************************************************** def sel_sort(data): c, s = 0, 0 num = len(data) if debug is True: print "Data Length : " + str(num) for i in xrange(0, num): curr_min = i newMin = False for j in xrange(i+1, num): c += 1 if data[j] < data[curr_min]: curr_min = j newMin = True if newMin is True: s += 1 data[i] , data[curr_min] = data[curr_min], data[i] return [c, s] #****************************************************************************** #****************************************************************************** def bubble_sort(data): c, s = 0, 0 num = len(data) for i in range(0, num): swap = False if debug is True: print "\niteration " + str(i) for j in range(0, num-i-1): c = c + 1 if data[j+1] < data[j]: data[j], data[j+1] = data[j+1], data[j] s = s + 1 swap = True if swap is False: break return [c, s] #****************************************************************************** #****************************************************************************** def recursive_bubble_sort(data, n): if n == 1: return for i in range(0, n-1): if data[i] > data[i+1]: data[i], data[i+1] = data[i+1], data[i] recursive_bubble_sort(data, n-1) #****************************************************************************** def quick_sort_partition (data, lo, hi, c): pivot = hi partition = lo for i in xrange(lo, hi): c[0] += 1 if data[i] < data[pivot]: c[1] += 1 data[i], data[partition] = data[partition], data[i] partition = partition + 1 c[1] += 1 data[partition], data[pivot] = data[pivot], data[partition] return partition def quick_sort (data, lo, hi, c=[0,0]): if lo < hi : p = quick_sort_partition (data, lo, hi, c) quick_sort(data, lo, p-1, c) quick_sort(data, p+1, hi, c) return c #****************************************************************************** #****************************************************************************** ## implement in-place merge routine for merge_sort def merge_inplace_merge(data, low, mid, high, c): i = low j = mid while i < j and j <= high: c[0] += 1 if data[i] < data[j]: i = i + 1 else: c[1]+=1 data[i], data[j] = data[j], data[i] i = i + 1 if i == j: j = j + 1 i = low #middle = mid #for j in xrange(mid, high+1): # for i in xrange(low, middle): # if middle <= high: # if data[i] > data[j]: # print "swap", i, data[i], j, data[j], middle # data[i], data[j] = data[j], data[i] # middle = middle + 1 #return ## Implementing in-place merge sort. Space optimized. def merge_sort (data, lo, hi, c=[0,0]): if (lo < hi): mid = (lo + hi + 1) / 2 merge_sort (data, lo, mid-1, c) merge_sort (data, mid, hi, c) merge_inplace_merge(data, lo, mid, hi, c) return c #****************************************************************************** #****************************************************************************** def insertion_sort (data): c, s = 0, 0 for i in xrange(1, len(data)): #insert elem data[i] at right location and shuffling s += 1 item = data[i] j = i while j > 0: #scan back from j by shifting c+=1 if data[j-1] > item: s+=1 data[j] = data[j-1] j -= 1 #location not found, keep shuffling else: data[j] = item #copy item at right location break if j == 0: data[j] = item return [c, s] #****************************************************************************** def heapsort_heapify(data, num, i, c): largest = i left = 2*i + 1 right = 2*i + 2 if left < num and data[left] > data[largest]: largest = left if right < num and data[right] > data[largest]: largest = right c[0] += 2 #two comparisions done above if i is not largest: data[i], data[largest] = data[largest], data[i] c[1] += 1 #one comparision done above heapsort_heapify(data, num, largest, c) return c def heapsort (data): num = len(data) c = [0, 0] # build a MAXHEAP on the data set in place # assuming the array is a binary heap, but needs heapification # rightmost node in the second-last level for i in xrange (num/2 - 1,-1, -1): heapsort_heapify(data, num, i, c) # make a max heap out of the array # pick the max everytime and shift with the rightmost for i in xrange(num-1, -1, -1): data[i], data[0] = data[0], data[i] c[1] += 1 heapsort_heapify(data, i, 0, c) return c #****************************************************************************** #****************************************************************************** def main (num): ########################################################################### output = {} # Key : 'name of sorting algorithm' # Values : List of [time , [comparisions, swaps], description print "SORTING " + str(num) + " ENTRIES\n" ########################################################################### print "HEAP SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() c = heapsort(data) t2 = time.time() output['heapsort_unsorted'] = [t2-t1, c, "heapsort on unsorted data"] ########## t1 = time.time() c = heapsort(data) t2 = time.time() output['heapsort_sorted'] = [t2-t1, c, "heapsort on sorted data"] ########## data.reverse() t1 = time.time() c = heapsort(data) t2 = time.time() output['heapsort_worstcase'] = [t2-t1, c, "heapsort on reverse sorted data"] ########################################################################### print "BUBBLE SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() c = bubble_sort(data) t2 = time.time() output['bubble_unsorted'] = [t2-t1, c, "bubble sort on unsorted data"] ########## t1 = time.time() c = bubble_sort(data) t2 = time.time() output['bubble_sorted'] = [t2-t1, c, "bubble sort on sorted data"] ########## data.reverse() t1 = time.time() c = bubble_sort(data) t2 = time.time() output['bubble_worstcase'] = [t2-t1, c, "bubble sort on reverse sorted data"] ########################################################################### #print "RECURSIVE BUBBLE SORT" #data = [int(random.random()*100) for i in xrange(num)] #t = time.time() #recursive_bubble_sort(data, len(data)) #print " Using recur bubble sort time taken .. " + str(time.time() - t) #t = time.time() #recursive_bubble_sort(data, len(data)) #print " Using recur bubble sort time taken (on sorted array).. " + str(time.time() - t) #data.reverse() #t = time.time() #recursive_bubble_sort(data, len(data)) #print " Using recur bubble sort time taken (worst case reverse sorted array).. " + str(time.time() - t) ########################################################################### print "SELECTION SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() c = sel_sort(data) t2 = time.time() output['selection_unsorted'] = [t2-t1, c, "selection sort on unsorted data"] ########## t1 = time.time() c = sel_sort(data) t2 = time.time() output['selection_sorted'] = [t2-t1, c, "selection sort on sorted data"] ########## data.reverse() t1 = time.time() c = sel_sort(data) t2 = time.time() output['selection_worstcase'] = [t2-t1, c, "selection sort on reverse sorted data"] ########################################################################### print "QUICK SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() c = quick_sort(data, 0, len(data) - 1) t2 = time.time() output['quick_unsorted'] = [t2-t1, c, "quick sort on unsorted data"] t1 = time.time() c = quick_sort(data, 0, len(data) - 1) t2 = time.time() output['quick_sorted'] = [t2-t1, c, "quick sort on unsorted data"] data.reverse() t1 = time.time() c = quick_sort(data, 0, len(data) - 1) t2 = time.time() output['quick_worstcase'] = [t2-t1, c, "quick sort on reverse sorted data"] ########################################################################### print "MERGE SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() merge_sort(data, 0, len(data) - 1) t2 = time.time() output['mergesort_unsorted'] = [t2-t1, c, "mergesort on unsorted data"] t1 = time.time() merge_sort(data, 0, len(data) -1) t2 = time.time() output['mergesort_sorted'] = [t2-t1, c, "mergesort on sorted data"] data.reverse() t1 = time.time() merge_sort(data, 0, len(data)-1) t2 = time.time() output['mergesort_worstcase'] = [t2-t1, c, "mergesort on reverse sorted data"] ########################################################################### print "INSERTION SORT" data = [int(random.random()*100) for i in xrange(num)] t1 = time.time() c = insertion_sort(data) t2 = time.time() output['insertionsort_unsorted'] = [t2-t1, c, "insertion sort on unsorted data"] t1 = time.time() c = insertion_sort(data) t2 = time.time() output['insertionsort_sorted'] = [t2-t1, c, "insertion sort on sorted data"] data.reverse() t1 = time.time() c = insertion_sort(data) t2 = time.time() output['insertionsort_worstcase'] = [t2-t1, c, "insertion sort on reverse sorted data"] ########################################################################### #TBD : format output print output return #****************************************************************************** #****************************************************************************** main(1000) #******************************************************************************
apache-2.0
6,825,287,574,014,931,000
32.340361
109
0.437348
false
hackfestca/cnb
cnb/modAvailable/CNBMMEncode.py
1
5840
#!/usr/bin/python # -*- coding: utf-8 -*- ''' CNB Matrix Module - encode ''' import string import urllib import base64 from cnb.cnbMatrixModule import CNBMatrixModule class CNBMMEncode(CNBMatrixModule): """ """ name = 'encode' usage = '' desc = 'Encode a string using different algorithm' aliases = [] isAdmin = False def __init__(self,log): CNBMatrixModule.__init__(self,log) self._initOptParser() def _initOptParser(self): CNBMatrixModule._initOptParser(self,False) encodeGrp = self.parser.add_argument_group('Encoding Options') decodeGrp = self.parser.add_argument_group('Decoding Options') encodeGrp.add_argument('--rot13', action='store_true', dest='rot13', default=False,\ help='Encode in rot13') encodeGrp.add_argument('--rotn', action='store_true', dest='rotn', default=False,\ help='Encode in rotN (need to specify -n [0-26])') encodeGrp.add_argument('--rotall', action='store_true', dest='rotall', default=False,\ help='Encode in all possible rotN (multiple output)') encodeGrp.add_argument('--b64', action='store_true', dest='b64', default=False,\ help='Encode in base64') encodeGrp.add_argument('--morse', action='store_true', dest='morse', default=False,\ help='Encode in morse') encodeGrp.add_argument('--url', action='store_true', dest='url', default=False,\ help='Encode in URL') decodeGrp.add_argument('--ub64', action='store_true', dest='ub64', default=False,\ help='Decode string from base64') decodeGrp.add_argument('--umorse', action='store_true', dest='umorse', default=False,\ help='Decode string from morse') decodeGrp.add_argument('--uurl', action='store_true', dest='uurl', default=False,\ help='Decode string from URL') self.parser.add_argument("-h", "--help", action="store_true", dest='help', default=False,\ help='Display help') self.parser.add_argument("-n", action="store", dest='n', type=int, default=0,\ help='Set a rotation iterator (for --rotn only)', nargs=1) self.parser.add_argument('string', metavar='STRING', action='store', default='',\ help='Text to encode or Cipher to decode', nargs='*') def _rotN(self, s, n): lc = string.lowercase trans = string.maketrans(lc, lc[n:] + lc[:n]) return string.translate(s, trans) def __del__(self): pass def processCmd(self, oMsg): result = 'Missing arguments, check help' (args, err) = self.getParsedArgs(oMsg.args) if args.string != '': s = ' '.join(args.string) else: s = '' if err != '': result = err elif args.rot13: if s != '': result = s.encode('rot13') elif args.rotn: if s != '' and args.n >= 0: result = self._rotN(s,args.n) elif args.rotall: if s != '': result = '' for i in range(1,26): result = result + self._rotN(s,i) + "\n" elif args.b64: if s != '': result = base64.b64encode(s) elif args.morse: if s != '': result = MorseEncoder().encode(s) elif args.url: if s != '': result = urllib.quote(s) elif args.ub64: if s != '': result = base64.b64decode(s) elif args.umorse: if s != '': result = MorseEncoder().decode(s) elif args.uurl: if s != '': result = urllib.unquote(s) elif args.help: result = self.getUsage() else: result = self.getUsage() return result class MorseEncoder(): morseAlphabet ={ "A" : ".-", "B" : "-...", "C" : "-.-.", "D" : "-..", "E" : ".", "F" : "..-.", "G" : "--.", "H" : "....", "I" : "..", "J" : ".---", "K" : "-.-", "L" : ".-..", "M" : "--", "N" : "-.", "O" : "---", "P" : ".--.", "Q" : "--.-", "R" : ".-.", "S" : "...", "T" : "-", "U" : "..-", "V" : "...-", "W" : ".--", "X" : "-..-", "Y" : "-.--", "Z" : "--..", " " : "/", "." : "/" } def __init__(self): self.inverseMorseAlphabet = dict((v,k) for (k,v) in self.morseAlphabet.items()) def decode(self, code, positionInString = 0): """ parse a morse code string positionInString is the starting point for decoding """ if positionInString < len(code): morseLetter = "" for key,char in enumerate(code[positionInString:]): if char == " ": positionInString = key + positionInString + 1 letter = self.inverseMorseAlphabet[morseLetter] return letter + self.decode(code, positionInString) else: morseLetter += char else: return "" def encode(self,message): """ encode a message in morse code, spaces between words are represented by '/' """ encodedMessage = "" for char in message[:]: if char.upper() in self.morseAlphabet: encodedMessage += self.morseAlphabet[char.upper()] + " " return encodedMessage
gpl-3.0
-2,542,247,868,136,886,000
31.087912
98
0.474315
false
drnextgis/QGIS
tests/src/python/test_qgsrelationeditwidget.py
1
12071
# -*- coding: utf-8 -*- """QGIS Unit tests for edit widgets. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '28/11/2015' __copyright__ = 'Copyright 2015, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA import os from qgis.core import ( QgsFeature, QgsVectorLayer, QgsProject, QgsRelation, QgsMapLayerRegistry, QgsTransaction, QgsFeatureRequest, QgsVectorLayerTools ) from qgis.gui import ( QgsEditorWidgetRegistry, QgsRelationWidgetWrapper, QgsAttributeEditorContext ) from qgis.PyQt.QtCore import QTimer from qgis.PyQt.QtWidgets import QToolButton, QTableView, QApplication from qgis.testing import start_app, unittest start_app() class TestQgsRelationEditWidget(unittest.TestCase): @classmethod def setUpClass(cls): """ Setup the involved layers and relations for a n:m relation :return: """ QgsEditorWidgetRegistry.initEditors() cls.dbconn = 'service=\'qgis_test\'' if 'QGIS_PGTEST_DB' in os.environ: cls.dbconn = os.environ['QGIS_PGTEST_DB'] # Create test layer cls.vl_b = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books" sql=', 'books', 'postgres') cls.vl_a = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."authors" sql=', 'authors', 'postgres') cls.vl_link = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books_authors" sql=', 'books_authors', 'postgres') QgsMapLayerRegistry.instance().addMapLayer(cls.vl_b) QgsMapLayerRegistry.instance().addMapLayer(cls.vl_a) QgsMapLayerRegistry.instance().addMapLayer(cls.vl_link) cls.relMgr = QgsProject.instance().relationManager() cls.rel_a = QgsRelation() cls.rel_a.setReferencingLayer(cls.vl_link.id()) cls.rel_a.setReferencedLayer(cls.vl_a.id()) cls.rel_a.addFieldPair('fk_author', 'pk') cls.rel_a.setRelationId('rel_a') assert(cls.rel_a.isValid()) cls.relMgr.addRelation(cls.rel_a) cls.rel_b = QgsRelation() cls.rel_b.setReferencingLayer(cls.vl_link.id()) cls.rel_b.setReferencedLayer(cls.vl_b.id()) cls.rel_b.addFieldPair('fk_book', 'pk') cls.rel_b.setRelationId('rel_b') assert(cls.rel_b.isValid()) cls.relMgr.addRelation(cls.rel_b) # Our mock QgsVectorLayerTools, that allow injecting data where user input is expected cls.vltools = VlTools() assert(cls.vl_a.isValid()) assert(cls.vl_b.isValid()) assert(cls.vl_link.isValid()) def setUp(self): self.startTransaction() def tearDown(self): self.rollbackTransaction() def test_delete_feature(self): """ Check if a feature can be deleted properly """ self.createWrapper(self.vl_a, '"name"=\'Erich Gamma\'') self.assertEqual(self.table_view.model().rowCount(), 1) self.assertEqual(1, len([f for f in self.vl_b.getFeatures()])) fid = next(self.vl_b.getFeatures(QgsFeatureRequest().setFilterExpression('"name"=\'Design Patterns. Elements of Reusable Object-Oriented Software\''))).id() self.widget.featureSelectionManager().select([fid]) btn = self.widget.findChild(QToolButton, 'mDeleteFeatureButton') btn.click() # This is the important check that the feature is deleted self.assertEqual(0, len([f for f in self.vl_b.getFeatures()])) # This is actually more checking that the database on delete action is properly set on the relation self.assertEqual(0, len([f for f in self.vl_link.getFeatures()])) self.assertEqual(self.table_view.model().rowCount(), 0) def test_list(self): """ Simple check if several related items are shown """ wrapper = self.createWrapper(self.vl_b) # NOQA self.assertEqual(self.table_view.model().rowCount(), 4) @unittest.expectedFailure(os.environ.get('QT_VERSION', '5') == '4' and os.environ.get('TRAVIS_OS_NAME', '') == 'linux') # It's probably not related to this variables at all, but that's the closest we can get to the real source of this problem at the moment... def test_add_feature(self): """ Check if a new related feature is added """ self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'') self.assertEqual(self.table_view.model().rowCount(), 0) self.vltools.setValues([None, 'The Hitchhiker\'s Guide to the Galaxy']) btn = self.widget.findChild(QToolButton, 'mAddFeatureButton') btn.click() # Book entry has been created self.assertEqual(2, len([f for f in self.vl_b.getFeatures()])) # Link entry has been created self.assertEqual(5, len([f for f in self.vl_link.getFeatures()])) self.assertEqual(self.table_view.model().rowCount(), 1) def test_link_feature(self): """ Check if an existing feature can be linked """ wrapper = self.createWrapper(self.vl_a, '"name"=\'Douglas Adams\'') # NOQA f = QgsFeature(self.vl_b.fields()) f.setAttributes([self.vl_b.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy']) self.vl_b.addFeature(f) def choose_linked_feature(): dlg = QApplication.activeModalWidget() dlg.setSelectedFeatures([f.id()]) dlg.accept() btn = self.widget.findChild(QToolButton, 'mLinkFeatureButton') timer = QTimer() timer.setSingleShot(True) timer.setInterval(0) # will run in the event loop as soon as it's processed when the dialog is opened timer.timeout.connect(choose_linked_feature) timer.start() btn.click() # magically the above code selects the feature here... link_feature = next(self.vl_link.getFeatures(QgsFeatureRequest().setFilterExpression('"fk_book"={}'.format(f[0])))) self.assertIsNotNone(link_feature[0]) self.assertEqual(self.table_view.model().rowCount(), 1) def test_unlink_feature(self): """ Check if a linked feature can be unlinked """ wrapper = self.createWrapper(self.vl_b) # All authors are listed self.assertEqual(self.table_view.model().rowCount(), 4) it = self.vl_a.getFeatures( QgsFeatureRequest().setFilterExpression('"name" IN (\'Richard Helm\', \'Ralph Johnson\')')) self.widget.featureSelectionManager().select([f.id() for f in it]) self.assertEqual(2, self.widget.featureSelectionManager().selectedFeatureCount()) btn = self.widget.findChild(QToolButton, 'mUnlinkFeatureButton') btn.click() # This is actually more checking that the database on delete action is properly set on the relation self.assertEqual(2, len([f for f in self.vl_link.getFeatures()])) self.assertEqual(2, self.table_view.model().rowCount()) def test_discover_relations(self): """ Test the automatic discovery of relations """ relations = self.relMgr.discoverRelations([], [self.vl_a, self.vl_b, self.vl_link]) relations = {r.name(): r for r in relations} self.assertEqual({'books_authors_fk_book_fkey', 'books_authors_fk_author_fkey'}, set(relations.keys())) ba2b = relations['books_authors_fk_book_fkey'] self.assertTrue(ba2b.isValid()) self.assertEqual('books_authors', ba2b.referencingLayer().name()) self.assertEqual('books', ba2b.referencedLayer().name()) self.assertEqual([0], ba2b.referencingFields()) self.assertEqual([0], ba2b.referencedFields()) ba2a = relations['books_authors_fk_author_fkey'] self.assertTrue(ba2a.isValid()) self.assertEqual('books_authors', ba2a.referencingLayer().name()) self.assertEqual('authors', ba2a.referencedLayer().name()) self.assertEqual([1], ba2a.referencingFields()) self.assertEqual([0], ba2a.referencedFields()) self.assertEqual([], self.relMgr.discoverRelations([self.rel_a, self.rel_b], [self.vl_a, self.vl_b, self.vl_link])) self.assertEqual(1, len(self.relMgr.discoverRelations([], [self.vl_a, self.vl_link]))) def startTransaction(self): """ Start a new transaction and set all layers into transaction mode. :return: None """ lyrs = [self.vl_a, self.vl_b, self.vl_link] self.transaction = QgsTransaction.create([l.id() for l in lyrs]) self.transaction.begin() for l in lyrs: l.startEditing() def rollbackTransaction(self): """ Rollback all changes done in this transaction. We always rollback and never commit to have the database in a pristine state at the end of each test. :return: None """ lyrs = [self.vl_a, self.vl_b, self.vl_link] for l in lyrs: l.commitChanges() self.transaction.rollback() def createWrapper(self, layer, filter=None): """ Basic setup of a relation widget wrapper. Will create a new wrapper and set its feature to the one and only book in the table. It will also assign some instance variables to help * self.widget The created widget * self.table_view The table view of the widget :return: The created wrapper """ if layer == self.vl_b: relation = self.rel_b nmrel = self.rel_a else: relation = self.rel_a nmrel = self.rel_b self.wrapper = QgsRelationWidgetWrapper(layer, relation) self.wrapper.setConfig({'nm-rel': nmrel.id()}) context = QgsAttributeEditorContext() context.setVectorLayerTools(self.vltools) self.wrapper.setContext(context) self.widget = self.wrapper.widget() self.widget.show() request = QgsFeatureRequest() if filter: request.setFilterExpression(filter) book = next(layer.getFeatures(request)) self.wrapper.setFeature(book) self.table_view = self.widget.findChild(QTableView) return self.wrapper class VlTools(QgsVectorLayerTools): """ Mock the QgsVectorLayerTools Since we don't have a user on the test server to input this data for us, we can just use this. """ def setValues(self, values): """ Set the values for the next feature to insert :param values: An array of values that shall be used for the next inserted record :return: None """ self.values = values def addFeature(self, layer, defaultValues, defaultGeometry): """ Overrides the addFeature method :param layer: vector layer :param defaultValues: some default values that may be provided by QGIS :param defaultGeometry: a default geometry that may be provided by QGIS :return: tuple(ok, f) where ok is if the layer added the feature and f is the added feature """ values = list() for i, v in enumerate(self.values): if v: values.append(v) else: values.append(layer.dataProvider().defaultValueClause(i)) f = QgsFeature(layer.fields()) f.setAttributes(self.values) f.setGeometry(defaultGeometry) ok = layer.addFeature(f) return ok, f def startEditing(self, layer): pass def stopEditing(self, layer, allowCancel): pass def saveEdits(self, layer): pass if __name__ == '__main__': unittest.main()
gpl-2.0
-7,272,650,242,732,273,000
34.60767
263
0.634247
false
tseaver/gcloud-python
pubsub/google/cloud/pubsub_v1/gapic/publisher_client.py
1
40338
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accesses the google.pubsub.v1 Publisher API.""" import functools import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.path_template import google.api_core.grpc_helpers import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.pubsub_v1.gapic import publisher_client_config from google.cloud.pubsub_v1.gapic.transports import publisher_grpc_transport from google.cloud.pubsub_v1.proto import pubsub_pb2 from google.cloud.pubsub_v1.proto import pubsub_pb2_grpc from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( 'google-cloud-pubsub', ).version class PublisherClient(object): """ The service that an application uses to manipulate topics, and to send messages to a topic. """ SERVICE_ADDRESS = 'pubsub.googleapis.com:443' """The default address of the service.""" # The scopes needed to make gRPC calls to all of the methods defined in # this service _DEFAULT_SCOPES = ( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/pubsub', ) # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. _INTERFACE_NAME = 'google.pubsub.v1.Publisher' @classmethod def from_service_account_file(cls, filename, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: PublisherClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename) kwargs['credentials'] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def topic_path(cls, project, topic): """Return a fully-qualified topic string.""" return google.api_core.path_template.expand( 'projects/{project}/topics/{topic}', project=project, topic=topic, ) @classmethod def project_path(cls, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( 'projects/{project}', project=project, ) def __init__(self, transport=None, channel=None, credentials=None, client_config=publisher_client_config.config, client_info=None): """Constructor. Args: transport (Union[~.PublisherGrpcTransport, Callable[[~.Credentials, type], ~.PublisherGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Raise deprecation warnings for things we want to go away. if client_config: warnings.warn('The `client_config` argument is deprecated.', PendingDeprecationWarning) if channel: warnings.warn( 'The `channel` argument is deprecated; use ' '`transport` instead.', PendingDeprecationWarning) # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=publisher_grpc_transport. PublisherGrpcTransport, ) else: if credentials: raise ValueError( 'Received both a transport instance and ' 'credentials; these are mutually exclusive.') self.transport = transport else: self.transport = publisher_grpc_transport.PublisherGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials, ) if client_info is None: client_info = ( google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config['interfaces'][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper # transport methods, wrapped with `wrap_method` to add retry, # timeout, and the like. self._inner_api_calls = {} # Service calls def create_topic(self, name, labels=None, message_storage_policy=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Creates the given topic with the given name. See the <a href=\"/pubsub/docs/admin#resource_names\"> resource name rules</a>. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> name = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> response = client.create_topic(name) Args: name (str): The name of the topic. It must have the format ``\"projects/{project}/topics/{topic}\"``. ``{topic}`` must start with a letter, and contain only letters (``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``), underscores (``_``), periods (``.``), tildes (``~``), plus (``+``) or percent signs (``%``). It must be between 3 and 255 characters in length, and it must not start with ``\"goog\"``. labels (dict[str -> str]): User labels. message_storage_policy (Union[dict, ~google.cloud.pubsub_v1.types.MessageStoragePolicy]): Policy constraining how messages published to the topic may be stored. It is determined when the topic is created based on the policy configured at the project level. It must not be set by the caller in the request to CreateTopic or to UpdateTopic. This field will be populated in the responses for GetTopic, CreateTopic, and UpdateTopic: if not present in the response, then no constraints are in effect. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.pubsub_v1.types.MessageStoragePolicy` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.Topic` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'create_topic' not in self._inner_api_calls: self._inner_api_calls[ 'create_topic'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_topic, default_retry=self._method_configs['CreateTopic'].retry, default_timeout=self._method_configs['CreateTopic']. timeout, client_info=self._client_info, ) request = pubsub_pb2.Topic( name=name, labels=labels, message_storage_policy=message_storage_policy, ) return self._inner_api_calls['create_topic']( request, retry=retry, timeout=timeout, metadata=metadata) def update_topic(self, topic, update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Updates an existing topic. Note that certain properties of a topic are not modifiable. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> # TODO: Initialize ``topic``: >>> topic = {} >>> >>> # TODO: Initialize ``update_mask``: >>> update_mask = {} >>> >>> response = client.update_topic(topic, update_mask) Args: topic (Union[dict, ~google.cloud.pubsub_v1.types.Topic]): The updated topic object. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.pubsub_v1.types.Topic` update_mask (Union[dict, ~google.cloud.pubsub_v1.types.FieldMask]): Indicates which fields in the provided topic to update. Must be specified and non-empty. Note that if ``update_mask`` contains \"message_storage_policy\" then the new value will be determined based on the policy configured at the project or organization level. The ``message_storage_policy`` must not be set in the ``topic`` provided above. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.pubsub_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.Topic` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'update_topic' not in self._inner_api_calls: self._inner_api_calls[ 'update_topic'] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_topic, default_retry=self._method_configs['UpdateTopic'].retry, default_timeout=self._method_configs['UpdateTopic']. timeout, client_info=self._client_info, ) request = pubsub_pb2.UpdateTopicRequest( topic=topic, update_mask=update_mask, ) return self._inner_api_calls['update_topic']( request, retry=retry, timeout=timeout, metadata=metadata) def publish(self, topic, messages, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Adds one or more messages to the topic. Returns ``NOT_FOUND`` if the topic does not exist. The message payload must not be empty; it must contain either a non-empty data field, or at least one attribute. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> topic = client.topic_path('[PROJECT]', '[TOPIC]') >>> data = b'' >>> messages_element = {'data': data} >>> messages = [messages_element] >>> >>> response = client.publish(topic, messages) Args: topic (str): The messages in the request will be published on this topic. Format is ``projects/{project}/topics/{topic}``. messages (list[Union[dict, ~google.cloud.pubsub_v1.types.PubsubMessage]]): The messages to publish. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.pubsub_v1.types.PubsubMessage` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.PublishResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'publish' not in self._inner_api_calls: self._inner_api_calls[ 'publish'] = google.api_core.gapic_v1.method.wrap_method( self.transport.publish, default_retry=self._method_configs['Publish'].retry, default_timeout=self._method_configs['Publish'].timeout, client_info=self._client_info, ) request = pubsub_pb2.PublishRequest( topic=topic, messages=messages, ) return self._inner_api_calls['publish']( request, retry=retry, timeout=timeout, metadata=metadata) def get_topic(self, topic, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Gets the configuration of a topic. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> topic = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> response = client.get_topic(topic) Args: topic (str): The name of the topic to get. Format is ``projects/{project}/topics/{topic}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.Topic` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'get_topic' not in self._inner_api_calls: self._inner_api_calls[ 'get_topic'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_topic, default_retry=self._method_configs['GetTopic'].retry, default_timeout=self._method_configs['GetTopic'].timeout, client_info=self._client_info, ) request = pubsub_pb2.GetTopicRequest(topic=topic, ) return self._inner_api_calls['get_topic']( request, retry=retry, timeout=timeout, metadata=metadata) def list_topics(self, project, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Lists matching topics. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> project = client.project_path('[PROJECT]') >>> >>> # Iterate over all results >>> for element in client.list_topics(project): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_topics(project, options=CallOptions(page_token=INITIAL_PAGE)): ... for element in page: ... # process element ... pass Args: project (str): The name of the cloud project that topics belong to. Format is ``projects/{project}``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.pubsub_v1.types.Topic` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'list_topics' not in self._inner_api_calls: self._inner_api_calls[ 'list_topics'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_topics, default_retry=self._method_configs['ListTopics'].retry, default_timeout=self._method_configs['ListTopics'].timeout, client_info=self._client_info, ) request = pubsub_pb2.ListTopicsRequest( project=project, page_size=page_size, ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls['list_topics'], retry=retry, timeout=timeout, metadata=metadata), request=request, items_field='topics', request_token_field='page_token', response_token_field='next_page_token', ) return iterator def list_topic_subscriptions( self, topic, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Lists the names of the subscriptions on this topic. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> topic = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> # Iterate over all results >>> for element in client.list_topic_subscriptions(topic): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_topic_subscriptions(topic, options=CallOptions(page_token=INITIAL_PAGE)): ... for element in page: ... # process element ... pass Args: topic (str): The name of the topic that subscriptions are attached to. Format is ``projects/{project}/topics/{topic}``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`str` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'list_topic_subscriptions' not in self._inner_api_calls: self._inner_api_calls[ 'list_topic_subscriptions'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_topic_subscriptions, default_retry=self. _method_configs['ListTopicSubscriptions'].retry, default_timeout=self. _method_configs['ListTopicSubscriptions'].timeout, client_info=self._client_info, ) request = pubsub_pb2.ListTopicSubscriptionsRequest( topic=topic, page_size=page_size, ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls['list_topic_subscriptions'], retry=retry, timeout=timeout, metadata=metadata), request=request, items_field='subscriptions', request_token_field='page_token', response_token_field='next_page_token', ) return iterator def delete_topic(self, topic, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Deletes the topic with the given name. Returns ``NOT_FOUND`` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their ``topic`` field is set to ``_deleted-topic_``. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> topic = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> client.delete_topic(topic) Args: topic (str): Name of the topic to delete. Format is ``projects/{project}/topics/{topic}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'delete_topic' not in self._inner_api_calls: self._inner_api_calls[ 'delete_topic'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_topic, default_retry=self._method_configs['DeleteTopic'].retry, default_timeout=self._method_configs['DeleteTopic']. timeout, client_info=self._client_info, ) request = pubsub_pb2.DeleteTopicRequest(topic=topic, ) self._inner_api_calls['delete_topic']( request, retry=retry, timeout=timeout, metadata=metadata) def set_iam_policy(self, resource, policy, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Sets the access control policy on the specified resource. Replaces any existing policy. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> resource = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> # TODO: Initialize ``policy``: >>> policy = {} >>> >>> response = client.set_iam_policy(resource, policy) Args: resource (str): REQUIRED: The resource for which the policy is being specified. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. policy (Union[dict, ~google.cloud.pubsub_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.pubsub_v1.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.Policy` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'set_iam_policy' not in self._inner_api_calls: self._inner_api_calls[ 'set_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( self.transport.set_iam_policy, default_retry=self._method_configs['SetIamPolicy'].retry, default_timeout=self._method_configs['SetIamPolicy']. timeout, client_info=self._client_info, ) request = iam_policy_pb2.SetIamPolicyRequest( resource=resource, policy=policy, ) return self._inner_api_calls['set_iam_policy']( request, retry=retry, timeout=timeout, metadata=metadata) def get_iam_policy(self, resource, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> resource = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> response = client.get_iam_policy(resource) Args: resource (str): REQUIRED: The resource for which the policy is being requested. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.Policy` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'get_iam_policy' not in self._inner_api_calls: self._inner_api_calls[ 'get_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_iam_policy, default_retry=self._method_configs['GetIamPolicy'].retry, default_timeout=self._method_configs['GetIamPolicy']. timeout, client_info=self._client_info, ) request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) return self._inner_api_calls['get_iam_policy']( request, retry=retry, timeout=timeout, metadata=metadata) def test_iam_permissions(self, resource, permissions, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error. Example: >>> from google.cloud import pubsub_v1 >>> >>> client = pubsub_v1.PublisherClient() >>> >>> resource = client.topic_path('[PROJECT]', '[TOPIC]') >>> >>> # TODO: Initialize ``permissions``: >>> permissions = [] >>> >>> response = client.test_iam_permissions(resource, permissions) Args: resource (str): REQUIRED: The resource for which the policy detail is being requested. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM Overview <https://cloud.google.com/iam/docs/overview#permissions>`_. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.pubsub_v1.types.TestIamPermissionsResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'test_iam_permissions' not in self._inner_api_calls: self._inner_api_calls[ 'test_iam_permissions'] = google.api_core.gapic_v1.method.wrap_method( self.transport.test_iam_permissions, default_retry=self._method_configs['TestIamPermissions']. retry, default_timeout=self._method_configs['TestIamPermissions']. timeout, client_info=self._client_info, ) request = iam_policy_pb2.TestIamPermissionsRequest( resource=resource, permissions=permissions, ) return self._inner_api_calls['test_iam_permissions']( request, retry=retry, timeout=timeout, metadata=metadata)
apache-2.0
2,759,004,533,117,325,300
44.838636
175
0.574768
false
robwarm/gpaw-symm
gpaw/test/lcao_pair_and_coulomb.py
1
2151
import numpy as np import cPickle as pickle from ase.structure import molecule from gpaw.lcao.tools import makeU, makeV from gpaw import GPAW, FermiDirac, restart from gpaw.lcao.pwf2 import LCAOwrap from gpaw.lcao.tools import remove_pbc, get_bfi2, get_bf_centers from gpaw.mpi import world, rank, MASTER, serial_comm from gpaw.test import equal scat = range(2) atoms = molecule('H2') atoms.set_cell([6.4, 6.4, 6.4]) atoms.center() calc = GPAW(mode='lcao', occupations=FermiDirac(0.1)) atoms.set_calculator(calc) atoms.get_potential_energy() calc.write('lcao_pair.gpw') if rank == MASTER: atoms, calc = restart('lcao_pair.gpw', txt=None, communicator=serial_comm) lcao = LCAOwrap(calc) fermi = calc.get_fermi_level() H = lcao.get_hamiltonian() S = lcao.get_overlap() pickle.dump((H, S), open('lcao_pair_hs.pckl', 'wb'), 2) symbols = atoms.get_chemical_symbols() #indices = get_bfi2(symbols, basis, scat) indices = range(2) lcao.get_xc(indices=indices).dump('lcao_pair_xc.pckl') lcao.get_Fcore(indices=indices).dump('lcao_pair_Fcore.pckl') w_wG = lcao.get_orbitals(indices=indices) P_awi = lcao.get_projections(indices=indices) pickle.dump((w_wG, P_awi), open('lcao_pair_w_wG__P_awi.pckl', 'wb'), 2) world.barrier() makeU('lcao_pair.gpw', 'lcao_pair_w_wG__P_awi.pckl', 'lcao_pair_eps_q__U_pq.pckl', 1.0e-5, dppname='lcao_pair_D_pp.pckl') world.barrier() makeV('lcao_pair.gpw', 'lcao_pair_w_wG__P_awi.pckl', 'lcao_pair_eps_q__U_pq.pckl', 'lcao_pair_V_qq.pckl', 'lcao_pair_V_qq.log', False) world.barrier() V_qq = np.load('lcao_pair_V_qq.pckl') eps_q, U_pq = np.load('lcao_pair_eps_q__U_pq.pckl') assert U_pq.flags.contiguous Usq_pq = U_pq * np.sqrt(eps_q) V_pp = np.dot(np.dot(Usq_pq, V_qq), Usq_pq.T.conj()) V_pp_ref = np.array( [[ 15.34450177, 11.12669608, 11.12669608, 12.82934563], [ 11.12669608, 8.82280293, 8.82280293, 11.12669608], [ 11.12669608, 8.82280293, 8.82280293, 11.12669608], [ 12.82934563, 11.12669608, 11.12669608, 15.34450178]]) equal(abs(V_pp_ref-V_pp).max(), 0.0, 1.0e-5)
gpl-3.0
945,840,178,704,066,600
33.142857
75
0.655974
false
rdeits/cryptics
pycryptics/grammar/cfg.py
1
3899
import nltk.grammar as gram import pycryptics.grammar.nodes as nd from pycryptics.utils.indicators import INDICATORS """ A Context Free Grammar (CFG) to describe allowed substructures of cryptic crossword clues and how to solve each substructure. """ # The basic wordplay transforms top = gram.Nonterminal(nd.TopNode) lit = gram.Nonterminal(nd.LitNode) d = gram.Nonterminal(nd.DNode) syn = gram.Nonterminal(nd.SynNode) first = gram.Nonterminal(nd.FirstNode) null = gram.Nonterminal(nd.NullNode) # Clue functions ana = gram.Nonterminal(nd.AnaNode) sub = gram.Nonterminal(nd.SubNode) sub_init = gram.Nonterminal(nd.SubInitNode) sub_final = gram.Nonterminal(nd.SubFinalNode) ins = gram.Nonterminal(nd.InsNode) rev = gram.Nonterminal(nd.RevNode) # ana_, rev_, etc. are anagram/reversal/etc indicators, # so they produce no text in the wordplay output ana_ = gram.Nonterminal(nd.AnaIndNode) sub_ = gram.Nonterminal(nd.SubIndNode) sub_init_ = gram.Nonterminal(nd.SubInitIndNode) sub_final_ = gram.Nonterminal(nd.SubFinalIndNode) ins_ = gram.Nonterminal(nd.InsIndNode) rev_ = gram.Nonterminal(nd.RevIndNode) ind_nodes = [nd.AnaIndNode, nd.SubIndNode, nd.SubFinalIndNode, nd.SubInitIndNode, nd.InsIndNode, nd.RevIndNode] # All the *_arg elements just exist to make the production rules more clear # so they just pass their inputs literally clue_arg = gram.Nonterminal(nd.ClueArgNode) ins_arg = gram.Nonterminal(nd.InsArgNode) ana_arg = gram.Nonterminal(nd.AnaArgNode) sub_arg = gram.Nonterminal(nd.SubArgNode) rev_arg = gram.Nonterminal(nd.RevArgNode) production_rules = { ins: [[ins_arg, ins_, ins_arg], [ins_arg, ins_arg, ins_]], ana: [[ana_arg, ana_], [ana_, ana_arg]], sub: [[sub_arg, sub_], [sub_, sub_arg]], sub_init: [[sub_arg, sub_init_], [sub_init_, sub_arg]], sub_final: [[sub_arg, sub_final_], [sub_final_, sub_arg]], rev: [[rev_arg, rev_], [rev_, rev_arg]], clue_arg: [[lit], [syn], [first], [null], [ana], [sub], [ins], [rev], [sub_init], [sub_final]], ins_arg: [[lit], [ana], [syn], [sub], [sub_init], [sub_final], [first], [rev]], ana_arg: [[lit]], sub_arg: [[lit], [syn], [rev]], rev_arg: [[lit], [syn]], top: [[clue_arg, d], [clue_arg, clue_arg, d], [clue_arg, clue_arg, clue_arg, d], [d, clue_arg], [d, clue_arg, clue_arg], [d, clue_arg, clue_arg, clue_arg], ] } additional_clue_rules = [[sub_init_] + [first] * i for i in range(3, 8)] + [[first] * i + [sub_init_] for i in range(3, 8)] for r in additional_clue_rules: production_rules[top].append(r + [d]) production_rules[top].append([d] + r) base_prods = [] for n, rules in production_rules.items(): for r in rules: base_prods.append(gram.Production(n, r)) known_functions = {'in': [ins_, lit, null, sub_], 'a': [lit, syn, null], 'is': [null, lit], 'for': [null, syn], 'large': [first, syn], 'primarily': [sub_init_], 'and': [null, lit], 'of': [null], 'on': [ins_, null, lit, syn], 'with': [null, ins_]} def generate_grammar(phrases): prods = [] for p in phrases: if p in known_functions: tags = known_functions[p] else: found = False tags = [lit, d, syn, first] for ind in ind_nodes: if any(w == p or (len(w) > 5 and abs(len(w) - len(p)) <= 3 and p.startswith(w[:-3])) for w in INDICATORS[ind.name]): tags.append(gram.Nonterminal(ind)) found = True if not found: tags = [lit, d, syn, first, ana_, sub_, sub_init_, sub_final_, rev_] for t in tags: prods.append(gram.Production(t, [p])) return gram.ContextFreeGrammar(top, base_prods + prods)
mit
3,835,686,781,258,495,000
37.22549
132
0.598872
false
DanielMSchmidt/kombinatorik
source/conf.py
1
9565
# -*- coding: utf-8 -*- # # Kombinatorik documentation build configuration file, created by # sphinx-quickstart on Wed Jul 24 17:58:33 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.mathjax', 'sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Kombinatorik' copyright = u'2013, Daniel Schmidt' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Kombinatorikdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Kombinatorik.tex', u'Kombinatorik', u'Daniel Schmidt', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'kombinatorik', u'Kombinatorik', [u'Daniel Schmidt'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Kombinatorik', u'Kombinatorik', u'Daniel Schmidt', 'Kombinatorik', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Kombinatorik' epub_author = u'Daniel Schmidt' epub_publisher = u'Daniel Schmidt' epub_copyright = u'2013, Daniel Schmidt' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # If 'no', URL addresses will not be shown. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
mit
5,717,283,508,492,965,000
30.25817
80
0.704757
false
Yanivs24/AutoPA
experiments/python_scripts/avg_results_files.py
1
1420
#!/usr/bin/python # This file is part of AutoPA - automatic extraction of pre-aspiration # from speech segments in audio files. # # Copyright (c) 2016 Yaniv Sheena import os import re import sys import numpy as np if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError("expected 1 value - results path") results_path = sys.argv[1] current_path = os.getcwd() results_full_path = current_path + '/' + results_path file_data = '' numbers_res = [] for file in os.listdir(results_path): with open(os.path.join(results_full_path, file), 'r') as f: file_data = f.read() # get all results in the order they appeared numbers_res.append(map(float, re.findall('\d+\.\d+', file_data))) text_pattern = re.split('\d+\.\d+', file_data) # average results avg_results = np.zeros(len(numbers_res[0])) for res in numbers_res: avg_results = avg_results+np.array(res) avg_results /= len(numbers_res) print 'Read %s result files - the average is:' % len(numbers_res) print avg_results # write results to file with the same format res_path = os.path.join(results_full_path, 'averaged_results.txt') f = open(res_path, 'w') for num, part in zip(avg_results, text_pattern): f.write(part) f.write(str(num)) f.close() print 'Saved average results to: %s' % res_path
lgpl-3.0
410,955,419,383,036,100
24.818182
73
0.620423
false
xuweiliang/Codelibrary
nova/policies/licence.py
1
1502
# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-licence' POLICY_ROOT = 'os_compute_api:os-licence:%s' licence_policies = [ policy.RuleDefault( name=POLICY_ROOT % 'discoverable', check_str=base.RULE_ANY), policy.RuleDefault( name=BASE_POLICY_NAME, check_str=base.RULE_ADMIN_OR_OWNER), ] #licence_policies = [ # policy.RuleDefault( # name=POLICY_ROOT % 'discoverable', # check_str=base.RULE_ANY), # policy.RuleDefault( # name=BASE_POLICY_NAME, # check_str=base.RULE_ADMIN_API), # policy.RuleDefault( # name=POLICY_ROOT % 'show', # check_str=base.RULE_ADMIN_API), # policy.RuleDefault( # name=POLICY_ROOT % 'update', # check_str=base.RULE_ADMIN_API) #] def list_rules(): return licence_policies
apache-2.0
-5,257,845,597,965,646,000
29.04
78
0.675766
false
srinath29/pyView
helper.py
1
2537
from PyQt4 import QtGui, uic, QtCore, Qt import PyQt4 import pandas import sys import os viewBase, viewForm = uic.loadUiType(os.path.join(os.path.dirname(os.path.realpath(__file__)),"view.ui")) class Helper(viewBase, viewForm): def __init__(self, parent = None): super(viewBase,self).__init__(parent) self.setupUi(self) def __version__(self): print("0.0.1") def View(self, df): """ This is to view a data frame :param df: pandas.Dataframe :return: Qt object to view the data """ #df = pandas.DataFrame() #self.table = QtGui.QTableWidget() self.table.setColumnCount(len(df.columns)) self.table.setRowCount(len(df.index)) self.sortOrder = {} for p in range(len(df.columns)): self.sortOrder[p] = "" for i in range(len(df.index)): for j in range(len(df.columns)): self.table.setItem(i,j, QtGui.QTableWidgetItem(str(df.iloc[i][j]))) for i in range(len(df.columns)): self.table.setHorizontalHeaderItem(i,QtGui.QTableWidgetItem(str(df.columns[i]))) for i in range(len(df.index)): self.table.setVerticalHeaderItem(i,QtGui.QTableWidgetItem(str(df.index[i]))) # for i in range(len(df.columns)): # self.table.horizontalHeaderItem(i).setText(str(df.columns[i])) self.horizHeader = self.table.horizontalHeader() self.horizHeader.setSortIndicatorShown(True) QtCore.QObject.connect(self.horizHeader, QtCore.SIGNAL("sectionClicked(int)"), self.sortByColumn) self.show() def sortByColumn(self,p): #print(self.sortOrder[p]) if self.sortOrder[p]=="": self.horizHeader.setSortIndicator(p, Qt.Qt.DescendingOrder) self.table.sortByColumn(p, Qt.Qt.DescendingOrder) self.sortOrder[p]="D" elif self.sortOrder[p]=="A": self.horizHeader.setSortIndicator(p, Qt.Qt.DescendingOrder) self.table.sortByColumn(p, Qt.Qt.DescendingOrder) self.sortOrder[p]="D" elif self.sortOrder[p]=="D": self.horizHeader.setSortIndicator(p, Qt.Qt.AscendingOrder) self.table.sortByColumn(p, Qt.Qt.AscendingOrder) self.sortOrder[p]="A" def View(df): app = QtGui.QApplication(sys.argv) app.setStyle("plastique") h = Helper() h.View(df) h.show() app.exec_()
gpl-3.0
-1,201,301,448,810,805,500
29.7125
105
0.592826
false
FFMG/myoddweb.piger
monitor/api/python/Python-3.7.2/Lib/test/test_pickletools.py
3
4333
import pickle import pickletools from test import support from test.pickletester import AbstractPickleTests import unittest class OptimizedPickleTests(AbstractPickleTests): def dumps(self, arg, proto=None): return pickletools.optimize(pickle.dumps(arg, proto)) def loads(self, buf, **kwds): return pickle.loads(buf, **kwds) # Test relies on precise output of dumps() test_pickle_to_2x = None # Test relies on writing by chunks into a file object. test_framed_write_sizes_with_delayed_writer = None def test_optimize_long_binget(self): data = [str(i) for i in range(257)] data.append(data[-1]) for proto in range(pickle.HIGHEST_PROTOCOL + 1): pickled = pickle.dumps(data, proto) unpickled = pickle.loads(pickled) self.assertEqual(unpickled, data) self.assertIs(unpickled[-1], unpickled[-2]) pickled2 = pickletools.optimize(pickled) unpickled2 = pickle.loads(pickled2) self.assertEqual(unpickled2, data) self.assertIs(unpickled2[-1], unpickled2[-2]) self.assertNotIn(pickle.LONG_BINGET, pickled2) self.assertNotIn(pickle.LONG_BINPUT, pickled2) def test_optimize_binput_and_memoize(self): pickled = (b'\x80\x04\x95\x15\x00\x00\x00\x00\x00\x00\x00' b']\x94(\x8c\x04spamq\x01\x8c\x03ham\x94h\x02e.') # 0: \x80 PROTO 4 # 2: \x95 FRAME 21 # 11: ] EMPTY_LIST # 12: \x94 MEMOIZE # 13: ( MARK # 14: \x8c SHORT_BINUNICODE 'spam' # 20: q BINPUT 1 # 22: \x8c SHORT_BINUNICODE 'ham' # 27: \x94 MEMOIZE # 28: h BINGET 2 # 30: e APPENDS (MARK at 13) # 31: . STOP self.assertIn(pickle.BINPUT, pickled) unpickled = pickle.loads(pickled) self.assertEqual(unpickled, ['spam', 'ham', 'ham']) self.assertIs(unpickled[1], unpickled[2]) pickled2 = pickletools.optimize(pickled) unpickled2 = pickle.loads(pickled2) self.assertEqual(unpickled2, ['spam', 'ham', 'ham']) self.assertIs(unpickled2[1], unpickled2[2]) self.assertNotIn(pickle.BINPUT, pickled2) class MiscTestCase(unittest.TestCase): def test__all__(self): blacklist = {'bytes_types', 'UP_TO_NEWLINE', 'TAKEN_FROM_ARGUMENT1', 'TAKEN_FROM_ARGUMENT4', 'TAKEN_FROM_ARGUMENT4U', 'TAKEN_FROM_ARGUMENT8U', 'ArgumentDescriptor', 'read_uint1', 'read_uint2', 'read_int4', 'read_uint4', 'read_uint8', 'read_stringnl', 'read_stringnl_noescape', 'read_stringnl_noescape_pair', 'read_string1', 'read_string4', 'read_bytes1', 'read_bytes4', 'read_bytes8', 'read_unicodestringnl', 'read_unicodestring1', 'read_unicodestring4', 'read_unicodestring8', 'read_decimalnl_short', 'read_decimalnl_long', 'read_floatnl', 'read_float8', 'read_long1', 'read_long4', 'uint1', 'uint2', 'int4', 'uint4', 'uint8', 'stringnl', 'stringnl_noescape', 'stringnl_noescape_pair', 'string1', 'string4', 'bytes1', 'bytes4', 'bytes8', 'unicodestringnl', 'unicodestring1', 'unicodestring4', 'unicodestring8', 'decimalnl_short', 'decimalnl_long', 'floatnl', 'float8', 'long1', 'long4', 'StackObject', 'pyint', 'pylong', 'pyinteger_or_bool', 'pybool', 'pyfloat', 'pybytes_or_str', 'pystring', 'pybytes', 'pyunicode', 'pynone', 'pytuple', 'pylist', 'pydict', 'pyset', 'pyfrozenset', 'anyobject', 'markobject', 'stackslice', 'OpcodeInfo', 'opcodes', 'code2op', } support.check__all__(self, pickletools, blacklist=blacklist) def test_main(): support.run_unittest(OptimizedPickleTests) support.run_unittest(MiscTestCase) support.run_doctest(pickletools) if __name__ == "__main__": test_main()
gpl-2.0
7,697,367,086,105,588,000
41.480392
81
0.55412
false
PietroPasotti/AutomatedTagger
analyzers.py
1
1493
#analyzers.py from main import _export,test,x2,RESULTS,evaluation def overlap(dic1,dic2): summ = len(set(dic1).union(set(dic2))) # if it's a dic, here we take the mere keys() # if it's a list, we take all overlapOfPlaindics = len(set(dic1)) + len(set(dic2)) - summ return overlapOfPlaindics def unpackKWS_CO(COres): kws = [] for CO in COres: CO1,CO2 = CO kws.extend([CO1,CO2]) return kws def matcher(item1,item2): val = 0 COres1,FRres1 = RESULTS[item1] COres2,FRres2 = RESULTS[item2] overlapCORES = overlap(COres1, COres2) kws1 = unpackKWS_CO(COres1) kws2 = unpackKWS_CO(COres2) overlapKWS_fromCO = overlap(kws1,kws2) # overlapping co_occurrence-extracted words overlapKWS_fromFR = overlap(FRres1,FRres2) # overlapping words extracted by frequency weight = 1.65 # ? weightedoverlapKWS = weight*overlapKWS_fromCO + overlapKWS_fromFR return weightedoverlapKWS def analyze(RESULTS,verbose): for item in _export['items']: test(item,verbose) top_co_occurr = x2.HighestNumbers(5, x2.co_occurrences ,getvalues = True) top_freq = x2.HighestNumbers(5, x2.words_count ,getvalues = True) RESULTS[item] = (top_co_occurr,top_freq) for elem in _export['items']: for otherelem in _export['items']: if elem != otherelem and frozenset({elem,otherelem}) not in evaluation: evaluation[frozenset({elem,otherelem})] = 'n/a.' for pair in evaluation: A,B = pair evaluation[pair] = matcher(A,B) return evaluation
agpl-3.0
4,131,902,085,462,726,700
23.883333
87
0.698593
false
KeyWeeUsr/plyer
plyer/compat.py
1
1120
''' Compatibility module for Python 2.7 and > 3.3 ============================================= ''' # pylint: disable=invalid-name __all__ = ('PY2', 'string_types', 'queue', 'iterkeys', 'itervalues', 'iteritems', 'xrange') import sys try: import queue except ImportError: import Queue as queue #: True if Python 2 intepreter is used PY2 = sys.version_info[0] == 2 #: String types that can be used for checking if a object is a string string_types = None text_type = None if PY2: # pylint: disable=undefined-variable # built-in actually, so it is defined in globals() for py2 string_types = basestring text_type = unicode else: string_types = text_type = str if PY2: iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() else: iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) if PY2: # pylint: disable=undefined-variable # built-in actually, so it is defined in globals() for py2 xrange = xrange else: xrange = range
mit
-9,110,793,413,409,874,000
24.454545
69
0.633929
false
SimonSapin/tinycss
tinycss/tests/test_api.py
2
1340
# coding: utf-8 """ Tests for the public API ------------------------ :copyright: (c) 2012 by Simon Sapin. :license: BSD, see LICENSE for more details. """ from __future__ import unicode_literals from pytest import raises from tinycss import make_parser from tinycss.page3 import CSSPage3Parser def test_make_parser(): class MyParser(object): def __init__(self, some_config): self.some_config = some_config parsers = [ make_parser(), make_parser('page3'), make_parser(CSSPage3Parser), make_parser(MyParser, some_config=42), make_parser(CSSPage3Parser, MyParser, some_config=42), make_parser(MyParser, 'page3', some_config=42), ] for parser, exp in zip(parsers, [False, True, True, False, True, True]): assert isinstance(parser, CSSPage3Parser) == exp for parser, exp in zip(parsers, [False, False, False, True, True, True]): assert isinstance(parser, MyParser) == exp for parser in parsers[3:]: assert parser.some_config == 42 # Extra or missing named parameters raises(TypeError, make_parser, some_config=4) raises(TypeError, make_parser, 'page3', some_config=4) raises(TypeError, make_parser, MyParser) raises(TypeError, make_parser, MyParser, some_config=4, other_config=7)
bsd-3-clause
-7,873,681,417,298,320,000
28.777778
77
0.64403
false
noironetworks/heat
heat/tests/openstack/keystone/test_service.py
1
10694
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks from heat.engine import properties from heat.engine import resource from heat.engine.resources.openstack.keystone import service from heat.engine import stack from heat.engine import template from heat.tests import common from heat.tests import utils keystone_service_template = { 'heat_template_version': '2015-04-30', 'resources': { 'test_service': { 'type': 'OS::Keystone::Service', 'properties': { 'name': 'test_service_1', 'description': 'Test service', 'type': 'orchestration', 'enabled': False } } } } class KeystoneServiceTest(common.HeatTestCase): def setUp(self): super(KeystoneServiceTest, self).setUp() self.ctx = utils.dummy_context() # Mock client self.keystoneclient = mock.Mock() self.patchobject(resource.Resource, 'client', return_value=fake_ks.FakeKeystoneClient( client=self.keystoneclient)) self.services = self.keystoneclient.services # Mock client plugin self.keystone_client_plugin = mock.MagicMock() def _setup_service_resource(self, stack_name, use_default=False): tmpl_data = copy.deepcopy(keystone_service_template) if use_default: props = tmpl_data['resources']['test_service']['properties'] del props['name'] del props['enabled'] del props['description'] test_stack = stack.Stack( self.ctx, stack_name, template.Template(tmpl_data) ) r_service = test_stack['test_service'] r_service.client = mock.MagicMock() r_service.client.return_value = self.keystoneclient r_service.client_plugin = mock.MagicMock() r_service.client_plugin.return_value = self.keystone_client_plugin return r_service def _get_mock_service(self): value = mock.MagicMock() value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152' return value def test_service_handle_create(self): rsrc = self._setup_service_resource('test_service_create') mock_service = self._get_mock_service() self.services.create.return_value = mock_service # validate the properties self.assertEqual( 'test_service_1', rsrc.properties.get(service.KeystoneService.NAME)) self.assertEqual( 'Test service', rsrc.properties.get( service.KeystoneService.DESCRIPTION)) self.assertEqual( 'orchestration', rsrc.properties.get(service.KeystoneService.TYPE)) self.assertFalse(rsrc.properties.get( service.KeystoneService.ENABLED)) rsrc.handle_create() # validate service creation self.services.create.assert_called_once_with( name='test_service_1', description='Test service', type='orchestration', enabled=False) # validate physical resource id self.assertEqual(mock_service.id, rsrc.resource_id) def test_service_handle_create_default(self): rsrc = self._setup_service_resource('test_create_with_defaults', use_default=True) mock_service = self._get_mock_service() self.services.create.return_value = mock_service rsrc.physical_resource_name = mock.MagicMock() rsrc.physical_resource_name.return_value = 'foo' # validate the properties self.assertIsNone( rsrc.properties.get(service.KeystoneService.NAME)) self.assertIsNone(rsrc.properties.get( service.KeystoneService.DESCRIPTION)) self.assertEqual( 'orchestration', rsrc.properties.get(service.KeystoneService.TYPE)) self.assertTrue(rsrc.properties.get(service.KeystoneService.ENABLED)) rsrc.handle_create() # validate service creation with physical resource name self.services.create.assert_called_once_with( name='foo', description=None, type='orchestration', enabled=True) def test_service_handle_update(self): rsrc = self._setup_service_resource('test_update') rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' prop_diff = {service.KeystoneService.NAME: 'test_service_1_updated', service.KeystoneService.DESCRIPTION: 'Test Service updated', service.KeystoneService.TYPE: 'heat_updated', service.KeystoneService.ENABLED: False} rsrc.handle_update(json_snippet=None, tmpl_diff=None, prop_diff=prop_diff) self.services.update.assert_called_once_with( service=rsrc.resource_id, name=prop_diff[service.KeystoneService.NAME], description=prop_diff[service.KeystoneService.DESCRIPTION], type=prop_diff[service.KeystoneService.TYPE], enabled=prop_diff[service.KeystoneService.ENABLED] ) def test_service_handle_update_default_name(self): rsrc = self._setup_service_resource('test_update_default_name') rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' rsrc.physical_resource_name = mock.MagicMock() rsrc.physical_resource_name.return_value = 'foo' # Name is reset to None, so default to physical resource name prop_diff = {service.KeystoneService.NAME: None} rsrc.handle_update(json_snippet=None, tmpl_diff=None, prop_diff=prop_diff) # validate default name to physical resource name self.services.update.assert_called_once_with( service=rsrc.resource_id, name='foo', type=None, description=None, enabled=None ) def test_service_handle_update_only_enabled(self): rsrc = self._setup_service_resource('test_update_enabled_only') rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' prop_diff = {service.KeystoneService.ENABLED: False} rsrc.handle_update(json_snippet=None, tmpl_diff=None, prop_diff=prop_diff) self.services.update.assert_called_once_with( service=rsrc.resource_id, name=None, description=None, type=None, enabled=prop_diff[service.KeystoneService.ENABLED] ) def test_properties_title(self): property_title_map = { service.KeystoneService.NAME: 'name', service.KeystoneService.DESCRIPTION: 'description', service.KeystoneService.TYPE: 'type', service.KeystoneService.ENABLED: 'enabled' } for actual_title, expected_title in property_title_map.items(): self.assertEqual( expected_title, actual_title, 'KeystoneService PROPERTIES(%s) title modified.' % actual_title) def test_property_name_validate_schema(self): schema = service.KeystoneService.properties_schema[ service.KeystoneService.NAME] self.assertTrue( schema.update_allowed, 'update_allowed for property %s is modified' % service.KeystoneService.NAME) self.assertEqual(properties.Schema.STRING, schema.type, 'type for property %s is modified' % service.KeystoneService.NAME) self.assertEqual('Name of keystone service.', schema.description, 'description for property %s is modified' % service.KeystoneService.NAME) def test_property_description_validate_schema(self): schema = service.KeystoneService.properties_schema[ service.KeystoneService.DESCRIPTION] self.assertTrue( schema.update_allowed, 'update_allowed for property %s is modified' % service.KeystoneService.DESCRIPTION) self.assertEqual(properties.Schema.STRING, schema.type, 'type for property %s is modified' % service.KeystoneService.DESCRIPTION) self.assertEqual('Description of keystone service.', schema.description, 'description for property %s is modified' % service.KeystoneService.DESCRIPTION) def test_property_type_validate_schema(self): schema = service.KeystoneService.properties_schema[ service.KeystoneService.TYPE] self.assertTrue( schema.update_allowed, 'update_allowed for property %s is modified' % service.KeystoneService.TYPE) self.assertTrue( schema.required, 'required for property %s is modified' % service.KeystoneService.TYPE) self.assertEqual(properties.Schema.STRING, schema.type, 'type for property %s is modified' % service.KeystoneService.TYPE) self.assertEqual('Type of keystone Service.', schema.description, 'description for property %s is modified' % service.KeystoneService.TYPE) def test_show_resource(self): rsrc = self._setup_service_resource('test_show_resource') moc_service = mock.Mock() moc_service.to_dict.return_value = {'attr': 'val'} self.services.get.return_value = moc_service attributes = rsrc._show_resource() self.assertEqual({'attr': 'val'}, attributes)
apache-2.0
-5,912,710,049,060,024,000
36.391608
78
0.601085
false
iulian787/spack
var/spack/repos/builtin/packages/ppopen-appl-fdm-at/package.py
2
1685
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PpopenApplFdmAt(MakefilePackage): """ppOpen-APPL/FDM with Auto-Tuning""" homepage = "http://ppopenhpc.cc.u-tokyo.ac.jp/ppopenhpc/" git = "https://github.com/Post-Peta-Crest/ppOpenHPC.git" version('master', branch='ATA/FDM') depends_on('mpi') # depends_on('ppopen-appl-fdm', type='build') build_directory = "3.hybrid_AT" parallel = False def edit(self, spec, prefix): with working_dir(self.build_directory): fflags = ['-O3', self.compiler.openmp_flag] if spec.satisfies('%gcc'): fflags.append('-ffree-line-length-none') if spec.satisfies('arch=x86_64:'): fflags.append('-mcmodel=medium') makefile_opt = FileFilter('Makefile.option') makefile_opt.filter( 'FC = .*$', 'FC = {0}'.format(spec['mpi'].mpifc) ) makefile_opt.filter( 'FFLAGS = .*$', 'FFLAGS = -O3 {0}'.format(' '.join(fflags)) ) def install(self, spec, prefix): mkdir(prefix.bin) copy(join_path(self.build_directory, 'seism3d3n'), prefix.bin) install_src_dir = join_path(prefix.src, self.build_directory) mkdirp(install_src_dir) install_tree(self.build_directory, install_src_dir) with working_dir(install_src_dir): make('clean') mkdir(prefix.doc) copy('readme.txt', prefix.doc)
lgpl-2.1
2,089,136,601,130,362,000
33.387755
73
0.587537
false
benhunter/py-stuff
misc/csgo-stats.py
1
12345
# https://old.reddit.com/r/GlobalOffensive/comments/8mjqgc/i_made_a_python_script_that_generates_stats_using/ # https://pastebin.com/LLpym05c import datetime import matplotlib.pyplot as plt def min_to_sec(line): # converts minutes in string format 'XXX:XX' to seconds seconds = 0 seconds += (int(line[-1])) seconds += (int(line[-2])) * 10 seconds += (int(line[-4])) * 60 if line[-5].isdigit(): seconds += (int(line[-5])) * 600 if line[-6].isdigit(): seconds += (int(line[-6])) * 6000 return seconds def create_plot(entries, plottitle, xaxlabel, filelabel, res, kdinput): # dont feel like commenting this tbh if kdinput: plt.hist(entries, bins=(int(max(entries) * res))) else: plt.hist(entries, bins=range(min(entries), max(entries) + 1, 1)) plt.title(plottitle) if kdinput: plt.xticks(range(0, int(max(entries)))) plt.xlabel(xaxlabel) plt.ylabel('Occurrences') ax = plt.gca() ax.set_axisbelow(True) ax.grid(color='b', linestyle=':', alpha=0.3, linewidth=1) xleft, xright = ax.get_xlim() ybottom, ytop = ax.get_ylim() ax.set_aspect(abs((xright - xleft) / (ybottom - ytop)) * 0.4) plt.savefig(filelabel, dpi=300) plt.clf() filename = input("Input File Name (e.g. stats.txt or stats.htm): ") steamid = input("Your Steam ID: ") # splits file into list of individual HTML element strings file = open(filename, encoding="utf8").read().split('<') stats = [] # contains lists of individual games # Format: ['MAP', [D, M, Y], Q LENGTH, GAME LENGTH, GAME SCORE,[PING, K, A, D, MVP, HSP, Score]] current_game = [0] * 6 # temporarily holds current game data begin = False # for parsing through beginning of document for i, line in enumerate(file): line = line.strip() if 'td>\n' in line: # game info lines begin with <td>\n for some reason if 'Competitive' in line[10:]: begin = True # begin storing document data here current_game[0] = line[22:] if line[10:12] == '20': year = line[10:14] month = line[15:17] day = line[18:20] current_game[1] = list(map(int, [day, month, year])) if 'Wait Time:' in line[10:]: current_game[2] = min_to_sec(line) if 'Match Duration:' in line[10:]: current_game[3] = min_to_sec(line) # stores personal game data as list if begin and line[0:7] == 'a class' and steamid in line: ping = file[i + 4][3:] k = file[i + 6][3:] a = file[i + 8][3:] d = file[i + 10][3:] # had to do this because single MVPs don't contain the number '1' by the star mvp = -1 # if MVP entry is empty if file[i + 12][-2] == '>': mvp = 1 else: for j, char in enumerate(file[i + 12]): if char.isdigit(): mvp = file[i + 12][j:] break # had to do this because some HSP entries are empty hsp = -1 # if HSP entry is empty if file[i + 14][-2].isdigit(): hsp = file[i + 14][3:len(file[i + 14]) - 1] score = file[i + 16][3:] # appends performance data (list of ints) to stats list as fifth 6th element current_game[5] = list(map(int, [ping, k, a, d, mvp, hsp, score])) # gets the match score and sorts it in a list of 2 ints (your score first) if 'csgo_scoreboard_score' in line: match_score = line[45:].split(' : ') if not isinstance(current_game[5], list): match_score.reverse() current_game[4] = list(map(int, match_score)) if isinstance(current_game[4], list) and isinstance(current_game[5], list): # individual game lists contain 6 entries stats.append(current_game) current_game = [0] * 6 # clears list before recording next game's info current_game[3] = 1800 # 30 minute placeholder # declaration of stat variables total_kills = 0 total_deaths = 0 total_assists = 0 total_MVPs = 0 total_rounds_w = 0 total_rounds_l = 0 max_match_length = 0 min_match_length = 5400 win_streak = 0 loss_streak = 0 tie_streak = 0 max_win_streak = 0 max_loss_streak = 0 max_tie_streak = 0 total_score = 0 hsp = [] # list containing all hsps mvp = [] # list containing all mvps map_plays = {} # dict containing maps (keys) and plays (vals) # initializing output file output = open('output.txt', 'w') stats.reverse() # looping through every 'stats' entry (game lists) for i, stat in enumerate(stats): # writing a list of every match to the output file output.write('\n' + str(i) + ': ' + repr(stat)) # summing K, D, A, MVP total_kills += stat[5][1] total_deaths += stat[5][3] total_assists += stat[5][2] total_MVPs += stat[5][4] total_rounds_w += stat[4][0] total_rounds_l += stat[4][1] total_score += stat[5][6] # creating list of Headshot Percentages (-1 excluded because -1 means no entry was listed) if stat[5][5] >= 0: hsp.append(stat[5][5]) # creating list of MVPs (-1 excluded because -1 means no entry was listed) if stat[5][4] >= 0: mvp.append(stat[5][4]) # finding the longest match if stat[3] > max_match_length: max_match_length = stat[3] max_match_index = i if stat[3] < min_match_length: min_match_length = stat[3] min_match_index = i # builds dictionary containing maps and number of times map has been played if stat[0] not in map_plays: map_plays[stat[0]] = 1 else: map_plays[stat[0]] += 1 ########################################################################### # convoluted way of calculating win/tie/loss streaks: if stat[4][0] > stat[4][1]: win_streak += 1 loss_streak, tie_streak = 0, 0 elif stat[4][0] == stat[4][1]: tie_streak += 1 win_streak, loss_streak = 0, 0 else: loss_streak += 1 win_streak, tie_streak = 0, 0 if win_streak > max_win_streak: max_win_streak = win_streak max_win_index = i if tie_streak > max_tie_streak: max_tie_streak = tie_streak max_tie_index = i if loss_streak > max_loss_streak: max_loss_streak = loss_streak max_loss_index = i ################################################################################ # writing output to output.txt file output.write('\nFormat: [\'MAP\', [D, M, Y], QUEUE LENGTH, GAME LENGTH, GAME SCORE, [PING, K, A, D, MVP, HSP, Score]]') output.write('\n\nSTATS----------------------------------------------------------------\n') output.write('{:<20} {:>7}'.format('\nTotal Kills:', total_kills)) output.write('{:<20} {:>7}'.format('\nTotal Deaths:', total_deaths)) output.write('{:<20} {:>7}'.format('\nTotal Assists:', total_assists)) output.write('{:<20} {:>7}'.format('\nTotal MVPs:', total_MVPs)) kdr = round(total_kills / total_deaths, 3) output.write('{:<20} {:>7}'.format('\nK/D:', kdr)) output.write('\n') output.write('{:<20} {:>7}'.format('\nTotal Rounds Won:', total_rounds_w)) output.write('{:<20} {:>7}'.format('\nTotal Rounds Lost:', total_rounds_l)) output.write('\n\nAverages (per game):') output.write('\n\t{:<15} {:>8}'.format('K:', round(total_kills / len(stats), 2))) output.write('\n\t{:<15} {:>8}'.format('D:', round(total_deaths / len(stats), 2))) output.write('\n\t{:<15} {:>8}'.format('A:', round(total_assists / len(stats), 2))) output.write('\n\t{:<15} {:>8}'.format('MVP:', round(total_MVPs / len(stats), 2))) output.write('\n\t{:<15} {:>8}'.format('Score:', round(total_score / len(stats), 2))) avg_rounds_won = round(total_rounds_w / len(stats), 1) avg_rounds_lost = round(total_rounds_l / len(stats), 1) output.write('\n\t{:<10} {} : {}'.format('Match (W:L):', avg_rounds_won, avg_rounds_lost)) total_rounds = total_rounds_l + total_rounds_w output.write('\n\nAverages (per round):') output.write('\n\t{:<15} {:>8}'.format('K:', round(total_kills / total_rounds, 2))) output.write('\n\t{:<15} {:>8}'.format('D:', round(total_deaths / total_rounds, 2))) output.write('\n\t{:<15} {:>8}'.format('A:', round(total_assists / total_rounds, 2))) output.write('\n\t{:<15} {:>8}'.format('MVP:', round(total_MVPs / total_rounds, 2))) output.write('\n\nHSP:') output.write('\n\t{:<10} {:>8}%'.format('Max:', round(max(hsp), 2))) output.write('\n\t{:<10} {:>8}%'.format('Min:', round(min(hsp), 2))) output.write('\n\t{:<10} {:>8}%'.format('Avg:', round(sum(hsp) / len(hsp), 1))) output.write( '\n\nLongest Match:\t\t{}\t\t(game #{})'.format(datetime.timedelta(seconds=max_match_length), max_match_index)) output.write( '\nShortest Match:\t\t{}\t\t(game #{})'.format(datetime.timedelta(seconds=min_match_length), min_match_index)) output.write( '\nMax Win Streak: \t{}\t\t(from game #{} to #{})'.format(max_win_streak, max_win_index - max_win_streak + 1, max_win_index)) output.write( '\nMax Tie Streak: \t{}\t\t(from game #{} to #{})'.format(max_tie_streak, max_tie_index - max_tie_streak + 1, max_tie_index)) output.write( '\nMax Loss Streak: \t{}\t\t(from game #{} to #{})'.format(max_loss_streak, max_loss_index - max_loss_streak + 1, max_loss_index)) output.write('\n\nMap Plays:') for entry in sorted(map_plays, key=map_plays.get, reverse=True): output.write('\n\t{:<12} {:>12}'.format(entry, map_plays[entry])) print('\'output.txt\' can be found in the same directory as this script') output.close() ##################################################################### # graphing and graphing calculations done below # lists containing raw vals for each stat kd = [] kills = [] deaths = [] assists = [] mvps = [] hsps = [] rw = [] # rounds won rl = [] games_played = {} for stat in stats: # collects vals from each game kills.append(stat[5][1]) deaths.append(stat[5][3]) assists.append(stat[5][2]) if stat[5][4] == -1: mvps.append(0) else: mvps.append(stat[5][4]) if stat[5][5] == -1: hsps.append(0) else: hsps.append(stat[5][5]) if stat[5][3] > 0: kd.append(stat[5][1] / stat[5][3]) else: kd.append(1) if stat[4][0] < 15: rw.append(stat[4][0]) if stat[4][1] < 15: rl.append(stat[4][1]) if stat[1][2] * 12 + stat[1][1] not in games_played: games_played[stat[1][2] * 12 + stat[1][1]] = 1 else: games_played[stat[1][2] * 12 + stat[1][1]] += 1 plt.rc('font', size=8) create_plot(kd, 'K/D Distribution', 'K/D (resolution: 0.05)', 'KD_Distribution.png', 20, True) kd_trimmed = [x for x in kd if x <= 3] create_plot(kd_trimmed, 'K/D Distribution (truncated at x = 3)', 'K/D (resolution: 0.01)', 'KD_Distribution (TRIMMED).png', 100, True) create_plot(kills, 'Kill Distribution', 'Kills', 'Kill_Distribution.png', 0, False) create_plot(deaths, 'Death Distribution', 'Deaths', 'Death_Distribution.png', 0, False) create_plot(assists, 'Assist Distribution', 'Assists', 'Assist_Distribution.png', 0, False) create_plot(mvps, 'MVP Distribution', 'MVPs', 'MVP_Distribution.png', 0, False) create_plot(hsps, 'HSP Distribution', 'HSP', 'HSP_Distribution.png', 0, False) create_plot(rw, 'Rounds Won Distribution (exc. 15, 16)', 'Rounds', 'RW_Distribution.png', 0, False) create_plot(rl, 'Rounds Lost Distribution (exc. 15, 16)', 'Rounds', 'RL_Distribution.png', 0, False) # graphing games played games_played_x = [] games_played_y = [] for entry in sorted(games_played): games_played_x.append(entry - 1) games_played_y.append(games_played[entry]) games_played_x_string = [] for entry in games_played_x: year = int(entry / 12) month = (entry % 12) + 1 monthyear = str(month) + '-' + str(year) games_played_x_string.append(monthyear) plt.bar(games_played_x, games_played_y) plt.title('Games Played Per Month') plt.xlabel('Month') plt.ylabel('Occurrences') plt.xticks(games_played_x[::4], games_played_x_string[::4], rotation='45') plt.savefig('Games_Played.png', dpi=300) plt.clf() print('output images can be found in the same directory as this script')
mit
500,048,332,103,682,240
35.202346
119
0.580154
false
fusionbox/mezzanine
mezzanine/core/templatetags/mezzanine_tags.py
1
23919
from __future__ import absolute_import, division, unicode_literals from future.builtins import int, open, str from hashlib import md5 import os try: from urllib.parse import quote, unquote except ImportError: from urllib import quote, unquote from django.contrib import admin from django.contrib.auth import REDIRECT_FIELD_NAME from django.contrib.sites.models import Site from django.core.files import File from django.core.files.storage import default_storage from django.core.urlresolvers import reverse, resolve, NoReverseMatch from django.db.models import Model, get_model from django.template import (Context, Node, TextNode, Template, TemplateSyntaxError, TOKEN_TEXT, TOKEN_VAR, TOKEN_COMMENT, TOKEN_BLOCK) from django.template.defaultfilters import escape from django.template.loader import get_template from django.utils import translation from django.utils.html import strip_tags from django.utils.text import capfirst from mezzanine.conf import settings from mezzanine.core.fields import RichTextField from mezzanine.core.forms import get_edit_form from mezzanine.utils.cache import nevercache_token, cache_installed from mezzanine.utils.html import decode_entities from mezzanine.utils.importing import import_dotted_path from mezzanine.utils.sites import current_site_id, has_site_permission from mezzanine.utils.urls import admin_url from mezzanine.utils.views import is_editable from mezzanine import template register = template.Library() if "compressor" in settings.INSTALLED_APPS: @register.tag def compress(parser, token): """ Shadows django-compressor's compress tag so it can be loaded from ``mezzanine_tags``, allowing us to provide a dummy version when django-compressor isn't installed. """ from compressor.templatetags.compress import compress return compress(parser, token) else: @register.to_end_tag def compress(parsed, context, token): """ Dummy tag for fallback when django-compressor isn't installed. """ return parsed if cache_installed(): @register.tag def nevercache(parser, token): """ Tag for two phased rendering. Converts enclosed template code and content into text, which gets rendered separately in ``mezzanine.core.middleware.UpdateCacheMiddleware``. This is to bypass caching for the enclosed code and content. """ text = [] end_tag = "endnevercache" tag_mapping = { TOKEN_TEXT: ("", ""), TOKEN_VAR: ("{{", "}}"), TOKEN_BLOCK: ("{%", "%}"), TOKEN_COMMENT: ("{#", "#}"), } delimiter = nevercache_token() while parser.tokens: token = parser.next_token() if token.token_type == TOKEN_BLOCK and token.contents == end_tag: return TextNode(delimiter + "".join(text) + delimiter) start, end = tag_mapping[token.token_type] text.append("%s%s%s" % (start, token.contents, end)) parser.unclosed_block_tag(end_tag) else: @register.to_end_tag def nevercache(parsed, context, token): """ Dummy fallback ``nevercache`` for when caching is not configured. """ return parsed @register.inclusion_tag("includes/form_fields.html", takes_context=True) def fields_for(context, form): """ Renders fields for a form. """ context["form_for_fields"] = form return context @register.inclusion_tag("includes/form_errors.html", takes_context=True) def errors_for(context, form): """ Renders an alert if the form has any errors. """ context["form"] = form return context @register.filter def sort_by(items, attr): """ General sort filter - sorts by either attribute or key. """ def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) # Reraise AttributeError return sorted(items, key=key_func) @register.filter def is_installed(app_name): """ Returns ``True`` if the given app name is in the ``INSTALLED_APPS`` setting. """ from warnings import warn warn("The is_installed filter is deprecated. Please use the tag " "{% ifinstalled appname %}{% endifinstalled %}") return app_name in settings.INSTALLED_APPS @register.tag def ifinstalled(parser, token): """ Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include. """ try: tag, app = token.split_contents() except ValueError: raise TemplateSyntaxError("ifinstalled should be in the form: " "{% ifinstalled app_name %}" "{% endifinstalled %}") end_tag = "end" + tag if app.strip("\"'") not in settings.INSTALLED_APPS: while True: token = parser.tokens.pop(0) if token.token_type == TOKEN_BLOCK and token.contents == end_tag: parser.tokens.insert(0, token) break nodelist = parser.parse((end_tag,)) parser.delete_first_token() class IfInstalledNode(Node): def render(self, context): return nodelist.render(context) return IfInstalledNode() @register.render_tag def set_short_url_for(context, token): """ Sets the ``short_url`` attribute of the given model for share links in the template. """ obj = context[token.split_contents()[1]] obj.set_short_url() return "" @register.simple_tag def gravatar_url(email, size=32): """ Return the full URL for a Gravatar given an email hash. """ bits = (md5(email.lower().encode("utf-8")).hexdigest(), size) return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits @register.to_end_tag def metablock(parsed): """ Remove HTML tags, entities and superfluous characters from meta blocks. """ parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",") return escape(strip_tags(decode_entities(parsed))) @register.inclusion_tag("includes/pagination.html", takes_context=True) def pagination_for(context, current_page, page_var="page", exclude_vars=""): """ Include the pagination template and data for persisting querystring in pagination links. Can also contain a comma separated string of var names in the current querystring to exclude from the pagination links, via the ``exclude_vars`` arg. """ querystring = context["request"].GET.copy() exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var] for exclude_var in exclude_vars: if exclude_var in querystring: del querystring[exclude_var] querystring = querystring.urlencode() return { "current_page": current_page, "querystring": querystring, "page_var": page_var, } @register.inclusion_tag("includes/search_form.html", takes_context=True) def search_form(context, search_model_names=None): """ Includes the search form with a list of models to use as choices for filtering the search by. Models should be a string with models in the format ``app_label.model_name`` separated by spaces. The string ``all`` can also be used, in which case the models defined by the ``SEARCH_MODEL_CHOICES`` setting will be used. """ if not search_model_names or not settings.SEARCH_MODEL_CHOICES: search_model_names = [] elif search_model_names == "all": search_model_names = list(settings.SEARCH_MODEL_CHOICES) else: search_model_names = search_model_names.split(" ") search_model_choices = [] for model_name in search_model_names: try: model = get_model(*model_name.split(".", 1)) except LookupError: pass else: verbose_name = model._meta.verbose_name_plural.capitalize() search_model_choices.append((verbose_name, model_name)) context["search_model_choices"] = sorted(search_model_choices) return context @register.simple_tag def thumbnail(image_url, width, height, quality=95, left=.5, top=.5, padding=False, padding_color="#fff"): """ Given the URL to an image, resizes the image using the given width and height on the first time it is requested, and returns the URL to the new resized image. if width or height are zero then original ratio is maintained. """ if not image_url: return "" try: from PIL import Image, ImageFile, ImageOps except ImportError: return "" image_url = unquote(str(image_url)).split("?")[0] if image_url.startswith(settings.MEDIA_URL): image_url = image_url.replace(settings.MEDIA_URL, "", 1) image_dir, image_name = os.path.split(image_url) image_prefix, image_ext = os.path.splitext(image_name) filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG") thumb_name = "%s-%sx%s" % (image_prefix, width, height) if left != .5 or top != .5: left = min(1, max(0, left)) top = min(1, max(0, top)) thumb_name = "%s-%sx%s" % (thumb_name, left, top) thumb_name += "-padded-%s" % padding_color if padding else "" thumb_name = "%s%s" % (thumb_name, image_ext) # `image_name` is used here for the directory path, as each image # requires its own sub-directory using its own name - this is so # we can consistently delete all thumbnails for an individual # image, which is something we do in filebrowser when a new image # is written, allowing us to purge any previously generated # thumbnails that may match a new image name. thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir, settings.THUMBNAILS_DIR_NAME, image_name) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) thumb_path = os.path.join(thumb_dir, thumb_name) thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME, quote(image_name.encode("utf-8")), quote(thumb_name.encode("utf-8"))) image_url_path = os.path.dirname(image_url) if image_url_path: thumb_url = "%s/%s" % (image_url_path, thumb_url) try: thumb_exists = os.path.exists(thumb_path) except UnicodeEncodeError: # The image that was saved to a filesystem with utf-8 support, # but somehow the locale has changed and the filesystem does not # support utf-8. from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if thumb_exists: # Thumbnail exists, don't generate it. return thumb_url elif not default_storage.exists(image_url): # Requested image does not exist, just return its URL. return image_url f = default_storage.open(image_url) try: image = Image.open(f) except: # Invalid image format. return image_url image_info = image.info to_width = int(width) to_height = int(height) from_width = image.size[0] from_height = image.size[1] # Set dimensions. if to_width == 0: to_width = from_width * to_height // from_height elif to_height == 0: to_height = from_height * to_width // from_width if image.mode not in ("P", "L", "RGBA"): try: image = image.convert("RGBA") except: return image_url # Required for progressive jpgs. ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2) # Padding. if padding and to_width and to_height: from_ratio = float(from_width) / from_height to_ratio = float(to_width) / to_height pad_size = None if to_ratio < from_ratio: pad_height = int(to_height * (float(from_width) / to_width)) pad_size = (from_width, pad_height) pad_top = (pad_height - from_height) // 2 pad_left = 0 elif to_ratio > from_ratio: pad_width = int(to_width * (float(from_height) / to_height)) pad_size = (pad_width, from_height) pad_top = 0 pad_left = (pad_width - from_width) // 2 if pad_size is not None: pad_container = Image.new("RGBA", pad_size, padding_color) pad_container.paste(image, (pad_left, pad_top)) image = pad_container # Create the thumbnail. to_size = (to_width, to_height) to_pos = (left, top) try: image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos) image = image.save(thumb_path, filetype, quality=quality, **image_info) # Push a remote copy of the thumbnail if MEDIA_URL is # absolute. if "://" in settings.MEDIA_URL: with open(thumb_path, "rb") as f: default_storage.save(thumb_url, File(f)) except Exception: # If an error occurred, a corrupted image may have been saved, # so remove it, otherwise the check for it existing will just # return the corrupted image next time it's requested. try: os.remove(thumb_path) except Exception: pass return image_url return thumb_url @register.inclusion_tag("includes/editable_loader.html", takes_context=True) def editable_loader(context): """ Set up the required JS/CSS for the in-line editing toolbar and controls. """ user = context["request"].user context["has_site_permission"] = has_site_permission(user) if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]: t = get_template("includes/editable_toolbar.html") context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME try: context["editable_obj"] except KeyError: context["editable_obj"] = context.get("page", None) context["toolbar"] = t.render(Context(context)) context["richtext_media"] = RichTextField().formfield().widget.media return context @register.filter def richtext_filters(content): """ Takes a value edited via the WYSIWYG editor, and passes it through each of the functions specified by the RICHTEXT_FILTERS setting. """ filter_names = settings.RICHTEXT_FILTERS if not filter_names: try: filter_names = [settings.RICHTEXT_FILTER] except AttributeError: pass else: from warnings import warn warn("The `RICHTEXT_FILTER` setting is deprecated in favor of " "the new plural setting `RICHTEXT_FILTERS`.") for filter_name in filter_names: filter_func = import_dotted_path(filter_name) content = filter_func(content) return content @register.filter def richtext_filter(content): """ Deprecated version of richtext_filters above. """ from warnings import warn warn("The `richtext_filter` template tag is deprecated in favor of " "the new plural tag `richtext_filters`.") return richtext_filters(content) @register.to_end_tag def editable(parsed, context, token): """ Add the required HTML to the parsed content for in-line editing, such as the icon and edit form if the object is deemed to be editable - either it has an ``editable`` method which returns ``True``, or the logged in user has change permissions for the model. """ def parse_field(field): field = field.split(".") obj = context.get(field.pop(0), None) attr = field.pop() while field: obj = getattr(obj, field.pop(0)) if callable(obj): # Allows {% editable page.get_content_model.content %} obj = obj() return obj, attr fields = [parse_field(f) for f in token.split_contents()[1:]] if fields: fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]] if not parsed.strip(): try: parsed = "".join([str(getattr(*field)) for field in fields]) except AttributeError: pass if settings.INLINE_EDITING_ENABLED and fields and "request" in context: obj = fields[0][0] if isinstance(obj, Model) and is_editable(obj, context["request"]): field_names = ",".join([f[1] for f in fields]) context["editable_form"] = get_edit_form(obj, field_names) context["original"] = parsed t = get_template("includes/editable_form.html") return t.render(Context(context)) return parsed @register.simple_tag def try_url(url_name): """ Mimics Django's ``url`` template tag but fails silently. Used for url names in admin templates as these won't resolve when admin tests are running. """ from warnings import warn warn("try_url is deprecated, use the url tag with the 'as' arg instead.") try: url = reverse(url_name) except NoReverseMatch: return "" return url def admin_app_list(request): """ Adopted from ``django.contrib.admin.sites.AdminSite.index``. Returns a list of lists of models grouped and ordered according to ``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the ``admin_dropdown_menu`` template tag as well as the ``app_list`` dashboard widget. """ app_dict = {} # Model or view --> (group index, group title, item index, item title). menu_order = {} for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER): group_title, items = group group_title = group_title.title() for (item_index, item) in enumerate(items): if isinstance(item, (tuple, list)): item_title, item = item else: item_title = None menu_order[item] = (group_index, group_title, item_index, item_title) # Add all registered models, using group and title from menu order. for (model, model_admin) in admin.site._registry.items(): opts = model._meta in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu() if in_menu and request.user.has_module_perms(opts.app_label): perms = model_admin.get_model_perms(request) admin_url_name = "" if perms["change"]: admin_url_name = "changelist" change_url = admin_url(model, admin_url_name) else: change_url = None if perms["add"]: admin_url_name = "add" add_url = admin_url(model, admin_url_name) else: add_url = None if admin_url_name: model_label = "%s.%s" % (opts.app_label, opts.object_name) try: app_index, app_title, model_index, model_title = \ menu_order[model_label] except KeyError: app_index = None app_title = opts.app_label.title() model_index = None model_title = None else: del menu_order[model_label] if not model_title: model_title = capfirst(model._meta.verbose_name_plural) if app_title not in app_dict: app_dict[app_title] = { "index": app_index, "name": app_title, "models": [], } app_dict[app_title]["models"].append({ "index": model_index, "perms": model_admin.get_model_perms(request), "name": model_title, "admin_url": change_url, "add_url": add_url }) # Menu may also contain view or url pattern names given as (title, name). for (item_url, item) in menu_order.items(): app_index, app_title, item_index, item_title = item try: item_url = reverse(item_url) except NoReverseMatch: continue if app_title not in app_dict: app_dict[app_title] = { "index": app_index, "name": app_title, "models": [], } app_dict[app_title]["models"].append({ "index": item_index, "perms": {"custom": True}, "name": item_title, "admin_url": item_url, }) app_list = list(app_dict.values()) sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"]) for app in app_list: app["models"].sort(key=sort) app_list.sort(key=sort) return app_list @register.inclusion_tag("admin/includes/dropdown_menu.html", takes_context=True) def admin_dropdown_menu(context): """ Renders the app list for the admin dropdown menu navigation. """ user = context["request"].user if user.is_staff: context["dropdown_menu_app_list"] = admin_app_list(context["request"]) if user.is_superuser: sites = Site.objects.all() else: sites = user.sitepermissions.get().sites.all() context["dropdown_menu_sites"] = list(sites) context["dropdown_menu_selected_site_id"] = current_site_id() return context @register.inclusion_tag("admin/includes/app_list.html", takes_context=True) def app_list(context): """ Renders the app list for the admin dashboard widget. """ context["dashboard_app_list"] = admin_app_list(context["request"]) return context @register.inclusion_tag("admin/includes/recent_actions.html", takes_context=True) def recent_actions(context): """ Renders the recent actions list for the admin dashboard widget. """ return context @register.render_tag def dashboard_column(context, token): """ Takes an index for retrieving the sequence of template tags from ``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin dashboard. """ column_index = int(token.split_contents()[1]) output = [] for tag in settings.DASHBOARD_TAGS[column_index]: t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split("."))) output.append(t.render(Context(context))) return "".join(output) @register.simple_tag(takes_context=True) def translate_url(context, language): """ Translates the current URL for the given language code, eg: {% translate_url de %} """ try: request = context["request"] except KeyError: return "" view = resolve(request.path) current_language = translation.get_language() translation.activate(language) try: url = reverse(view.func, args=view.args, kwargs=view.kwargs) except NoReverseMatch: try: url_name = (view.url_name if not view.namespace else '%s:%s' % (view.namespace, view.url_name)) url = reverse(url_name, args=view.args, kwargs=view.kwargs) except NoReverseMatch: url_name = "admin:" + view.url_name url = reverse(url_name, args=view.args, kwargs=view.kwargs) translation.activate(current_language) if context['request'].META["QUERY_STRING"]: url += "?" + context['request'].META["QUERY_STRING"] return url
bsd-2-clause
290,233,697,215,057,200
34.435556
79
0.609641
false
ActiveState/code
recipes/Python/511508_Binomial_Queues/recipe-511508.py
1
4907
""" BinomialQueue.py Meldable priority queues Written by Gregoire Dooms and Irit Katriel """ class LinkError(Exception): pass class EmptyBinomialQueueError(Exception): pass class BinomialTree: "A single Binomial Tree" def __init__(self, value): "Create a one-node tree. value is the priority of this node" self.value = value self.rank = 0 self.children = [] def link(self, other_tree): """Make other_tree the son of self. Both trees must have the same rank, and other_tree must have a larger minimum priority """ if self.rank != other_tree.rank: raise LinkError() if self.value > other_tree.value: raise LinkError() self.children.append(other_tree) self.rank += 1 return True def str(self, indent = 0): return (" "*indent + "rank: %d value: %d"%(self.rank, self.value)+ "\n"+"".join(child.str(indent+2) for child in self.children) ) def __str__(self): return self.str() class BinomialQueue: """ A Meldable priority Queue """ def __init__(self,infinity=1e300): """ Create an empty Binomial Queue. Since a queue can hold any comparable data type, we need to know at initialization time what an "infinity" element looks like. """ self.infinity = infinity self.parent = self self.trees = [] self.elements = 0 self.min = self.infinity self.min_tree_rank = -1 def __capacity(self): return 2**len(self.trees) - 1 def __resize(self): while self.__capacity() < self.elements: self.trees.append(None) def __add_tree(self,new_tree): " Insert new_tree into self" self.elements = self.elements + 2**new_tree.rank self.__resize() while self.trees[new_tree.rank] is not None: if self.trees[new_tree.rank].value < new_tree.value: new_tree, self.trees[new_tree.rank] = \ self.trees[new_tree.rank], new_tree # swap r = new_tree.rank new_tree.link(self.trees[r]) self.trees[r] = None self.trees[new_tree.rank] = new_tree if new_tree.value <= self.min: self.min = new_tree.value self.min_tree_rank = new_tree.rank def meld(self, other_queue): "Insert all elements of other_queue into self " for tree in other_queue.trees: if tree is not None: self.__add_tree(tree) def insert(self, value): "Insert value into self " tree = BinomialTree(value) self.__add_tree(tree) def get_min(self): "Return the minimum element in self" return self.min def delete_min(self): "Delete the minumum element from self " if not self: raise EmptyBinomialQueueError() to_remove = self.trees[self.min_tree_rank] self.trees[to_remove.rank] = None self.elements = self.elements - 2**to_remove.rank for child in to_remove.children: self.__add_tree(child) self.min = self.infinity for tree in self.trees: if tree is not None: if tree.value <= self.min: self.min = tree.value self.min_tree_rank = tree.rank def __nonzero__(self): return self.elements def __str__(self): s = """elements: %d min: %s min_tree_rank: %d tree vector: """ % (self.elements, str(self.min), self.min_tree_rank) s += " ".join("10"[tree is None] for tree in self.trees) s += "\n" s += "".join(str(tree) for tree in self.trees if tree is not None) return s def __len__(self): return self.elements def __iadd__(self,other): if type(other) == type(self): self.meld(other) else: self.insert(other) return self def run_test(): inf = 2e300 N = 10 Q1 = BinomialQueue(inf) Q2 = BinomialQueue(inf) print Q1 print "-------------------------------------------" Q1 += 20 # Same as Q1.insert(20) Q1.insert(1) Q1.insert(5) Q1.insert(10) print Q1 print "-------------------------------------------" Q2.insert(2) Q2.insert(22) Q2.insert(12) print Q2 print "-------------------------------------------" Q1 += Q2 # Same as Q1.meld(Q2) print Q1 print "-------------------------------------------" while Q1: print "Q1.min = ", Q1.min Q1.delete_min() if __name__ == "__main__": run_test()
mit
-1,932,582,368,276,943,000
27.864706
77
0.509069
false
1000ideas/sublime_redmine
Redmine.py
1
6831
import re import json import functools import urllib, urllib2 import sublime, sublime_plugin, threading import webbrowser class RedmineError(Exception): pass def main_thread(callback, *args, **kwargs): # sublime.set_timeout gets used to send things onto the main thread # most sublime.[something] calls need to be on the main thread sublime.set_timeout(functools.partial(callback, *args, **kwargs), 0) def open_in_browser(url, browser = None): if not re.search("^https?://", url): url = "http://" + url try: print browser webbrowser.get(browser).open_new_tab(url) except webbrowser.Error: sublime.error_message("[Redmine] Invalid browser command") class RedmineAPIThread(threading.Thread): def __init__(self, method, path, callback = None, data={}, host = '', apikey = ''): if re.search("^https?://", host): self.host = host else: self.host = "http://" + host self.key = apikey self.method = method self.path = path self.data = data self.callback = callback threading.Thread.__init__(self) def run(self): h = { "X-Redmine-API-Key": self.key, "Content-Type": 'application/json' } try: opener = urllib2.build_opener(urllib2.HTTPHandler) if self.method == "GET": url = "%s/%s.json?%s" % (self.host, self.path, urllib.urlencode(self.data)) _data = None else: url = "%s/%s.json" % (self.host, self.path) _data = json.dumps(self.data) #if self.data != None else None print "[%s] %s" %(self.method, url) req = urllib2.Request(url, _data, headers= h) req.get_method = lambda: self.method http_file = urllib2.urlopen(req) main_thread(self.callback, http_file.read().decode('utf-8')) except urllib2.HTTPError as e: main_thread(sublime.error_message, "[Redmine] %s (%s)" % (e, url)) except urllib2.URLError as e: main_thread(sublime.error_message, "[Redmine] URLError: %s" % (e)) class RedmineCommand(sublime_plugin.WindowCommand): def api_call(self, path, data={}, method="GET", callback=None): try: s = sublime.load_settings("Redmine.sublime-settings") host = s.get('host') if len(host) == 0: raise RedmineError("Invalid host name") apikey = s.get('apikey') if len(apikey) == 0: raise RedmineError("Invalid host name") thread = RedmineAPIThread(method, path, callback or self.generic_callback, data, host, apikey) thread.start() except RedmineError as ex: sublime.error_message("[Redmine] %s" % ex) def generic_callback(self, output): pass def quick_panel(self, *args, **kwargs): self.window.show_quick_panel(*args, **kwargs) class ListRedmineStatusesCommand(RedmineCommand): def __init__(self, window): self.statuses = [] RedmineCommand.__init__(self, window) def run(self): if len(self.statuses) == 0: self.api_call('issue_statuses') else: self.select_status() def generic_callback(self, output): jout = json.loads(output) self.statuses = jout['issue_statuses'] self.select_status() def select_status(self): self.quick_panel([s['name'] for s in self.statuses], self.status_selected) def status_selected(self, idx): if idx >= 0: sublime.status_message("Selected status: %s" % (self.statuses[idx]['name'])) class ListRedmineIssuesCommand(RedmineCommand): def run(self, issue_filter = {}): issue_filter.update({'sort': 'id:desc'}) self.api_call('issues', issue_filter) def generic_callback(self, output): jout = json.loads(output) self.issues = jout['issues'] self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue) def select_issue(self, idx): if idx >= 0: issue_id = self.issues[idx]['id'] s = sublime.load_settings("Redmine.sublime-settings") host = s.get('host') browser = s.get('browser') if not isinstance(browser, basestring): browser = None else: browser = str(browser) open_in_browser( "%s/issues/%s" % (host, issue_id), browser ) class UpdateRedmineStatusCommand(ListRedmineStatusesCommand): def run(self, issue_id): if issue_id == None: return self.issue_id = issue_id ListRedmineStatusesCommand.run(self) def status_selected(self, idx): if idx >= 0 and self.issue_id != None: self.status = self.statuses[idx] self.api_call( 'issues/%s' % self.issue_id, {'issue' : {'status_id': self.status['id']}}, 'PUT', self.update_response ) def update_response(self, output): sublime.status_message("Status of #%s changed to %s" %(self.issue_id, self.status['name'])) class UpdateRedmineIssuesCommand(ListRedmineIssuesCommand): def select_issue(self, idx): if idx >= 0: issue_id = self.issues[idx]['id'] self.window.run_command('update_redmine_status', {'issue_id': issue_id}) class StartRedmineIssuesCommand(ListRedmineIssuesCommand): def generic_callback(self, output): jout = json.loads(output) self.issues = filter(lambda i: not i.get('play', False), jout['issues']) if len(self.issues) > 0: self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue) else: sublime.status_message("No issues to start!") def select_issue(self, idx): if idx >= 0: self.issue_id = self.issues[idx]['id'] self.api_call( 'issues/%s/start_time' % self.issue_id, None, 'POST', self.started_response ) def started_response(self, output): jout = json.loads(output) if jout.get('success', False): sublime.status_message("Time tracking for #%s started." % (self.issue_id) ) else: sublime.error_message("[Redmine] Error occured!") class StopRedmineIssuesCommand(ListRedmineIssuesCommand): def generic_callback(self, output): jout = json.loads(output) self.issues = filter(lambda i: i.get('play', False), jout['issues']) if len(self.issues) > 0: self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue) else: sublime.status_message("No started issues!") def select_issue(self, idx): if idx >= 0: self.issue_id = self.issues[idx]['id'] self.api_call( 'issues/%s/stop_time' % self.issue_id, None, 'POST', self.stoped_response ) def stoped_response(self, output): jout = json.loads(output) if jout.get('success', False): sublime.status_message("Time tracking for #%s stoped. Time spend: %.2fh." % (self.issue_id, jout.get('time', 0.0))) else: sublime.error_message("[Redmine] Error occured!")
mit
-6,297,098,791,352,698,000
31.528571
128
0.631386
false
ekansa/open-context-py
opencontext_py/apps/about/views.py
1
20142
import json from django.conf import settings from django.http import HttpResponse, Http404 from django.template import RequestContext, loader from opencontext_py.libs.general import LastUpdatedOrderedDict from opencontext_py.libs.rootpath import RootPath from opencontext_py.libs.requestnegotiation import RequestNegotiation from opencontext_py.apps.about.estimator import CostEstimator from django.views.decorators.csrf import ensure_csrf_cookie from django.views.decorators.cache import cache_control from django.views.decorators.cache import never_cache # @cache_control(no_cache=True) # @never_cache def index_view(request): """ Get the search context JSON-LD """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Video and introduction to Open Context, an open-access ' 'data publication service for archaeology ', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': 'https://opencontext.wistia.com/medias/s0g0fsyqkz' } template = loader.get_template('about/index.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About', 'act_nav': 'about', 'og': open_graph, 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def uses_view(request): """ Get uses page """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support template = loader.get_template('about/uses.html') open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/uses', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Summary of how to use Open Context for sharing, '\ 'preserving, exploring and analyzing archaeological '\ 'research data', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } context = { 'base_url': base_url, 'page_title': 'Open Context: About - Uses', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def pub_view(request): """ Get publishing overview page """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/publishing', 'site_name': settings.CANONICAL_SITENAME, 'description': 'How to publish archaeological research data '\ 'with Open Context', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/publishing.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Publishing', 'act_nav': 'about', 'og': open_graph, 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) # @cache_control(no_cache=True) # @never_cache def people_view(request): """ Get people page """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/people', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Data editors, software developers, designers '\ 'and alumni with Open Context research data '\ 'publishing services', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/people.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - People', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @ensure_csrf_cookie # @cache_control(no_cache=True) # @never_cache def estimate_view(request): """ Get page with publication project cost estimation """ rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/estimate', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Estimate data publication and archiving '\ 'costs with Open Context to help budget for '\ 'grant data management plans', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/estimate.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Cost Estimate', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def process_estimate(request): """ process an estimate """ if request.method == 'POST': cost = CostEstimator() output = cost.process_estimate(request.POST) json_output = json.dumps(output, indent=4, ensure_ascii=False) return HttpResponse(json_output, content_type='application/json; charset=utf8') elif request.method == 'GET': cost = CostEstimator() output = cost.process_estimate(request.GET) json_output = json.dumps(output, indent=4, ensure_ascii=False) return HttpResponse(json_output, content_type='application/json; charset=utf8') else: return HttpResponseForbidden def concepts_view(request): """ Get concepts overview """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support template = loader.get_template('about/temp.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Concepts', 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def tech_view(request): """ Show technology page """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/technology', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Overview of the open-source software technologies '\ 'created and used by Open Context to publish '\ 'archaeological data on the Web', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/technology.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Technology', 'act_nav': 'about', 'og': open_graph, 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) def services_view(request): """ Get page documenting the API """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/technology', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Overview of the APIs (machine-readable data) '\ 'offered by Open Context to promote '\ 'interoperability and new uses of data', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/services.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Web Services and APIs', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) def recipes_view(request): """ Get page about recipes using the API """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/recipes', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Specific guidance on the use of Open Context APIs '\ '(machine-readable data) to meet certain data '\ 'management needs', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/recipes.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - API Cookbook', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def bibliography_view(request): """ Get page about bibliography / publications """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/bibliography', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Publications related to Open Context and its '\ 'contributions to research data management, '\ 'archaeological ethics, scholarly communications, and '\ 'professional practice', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/bibliography.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Bibliography', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def ip_view(request): """ Get page about IP policies """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/intellectual-property', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Intellectual property policies for Open Context and '\ 'ethical guidance for contributors and users of '\ 'archaeological research data', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/intellectual-property.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Intellectual Property', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) @cache_control(no_cache=True) @never_cache def sponsors_view(request): """ Get the page about sponsors """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/sponsors', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Sources of financial support for '\ 'Open Context and collaborative institutions providing '\ 'complementary services', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/sponsors.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Intellectual Property', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context, request)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415) # @cache_control(no_cache=True) # @never_cache def terms_view(request): """ Get the page about Terms """ request = RequestNegotiation().anonymize_request(request) rp = RootPath() base_url = rp.get_baseurl() req_neg = RequestNegotiation('text/html') if 'HTTP_ACCEPT' in request.META: req_neg.check_request_support(request.META['HTTP_ACCEPT']) if req_neg.supported: # requester wanted a mimetype we DO support open_graph = { 'twitter_site': settings.TWITTER_SITE, 'type': 'website', 'url': base_url + '/about/terms', 'site_name': settings.CANONICAL_SITENAME, 'description': 'Terms and Conditions of Use, and '\ 'Privacy Policies for Open Context', 'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png', 'video': False } template = loader.get_template('about/terms.html') context = { 'base_url': base_url, 'page_title': 'Open Context: About - Terms of Use and Privacy Policies', 'og': open_graph, 'act_nav': 'about', 'nav_items': settings.NAV_ITEMS } return HttpResponse(template.render(context)) else: # client wanted a mimetype we don't support return HttpResponse(req_neg.error_message, status=415)
gpl-3.0
8,155,570,767,275,034,000
38.887129
84
0.578642
false
polyaxon/polyaxon
core/polyaxon/utils/string_utils.py
1
2468
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import re import unicodedata from decimal import Decimal from typing import Callable def strip_spaces(value, sep=None, join=True): """Cleans trailing whitespaces and replaces also multiple whitespaces with a single space.""" value = value.strip() value = [v.strip() for v in value.split(sep)] join_sep = sep or " " return join_sep.join(value) if join else value def is_protected_type(obj): """ A check for preserving a type as-is when passed to force_text(strings_only=True). """ return isinstance( obj, ( type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time, ), ) def force_bytes(value, encoding="utf-8", strings_only=False, errors="strict"): """ Resolve any value to strings. If `strings_only` is True, skip protected objects. """ # Handle the common case first for performance reasons. if isinstance(value, bytes): if encoding == "utf-8": return value return value.decode("utf-8", errors).encode(encoding, errors) if strings_only and is_protected_type(value): return value if isinstance(value, memoryview): return bytes(value) return value.encode(encoding, errors) def slugify(value: str, mark_safe: Callable = None) -> str: """ Convert spaces/dots to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Also strip leading and trailing whitespace. """ value = str(value) value = ( unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii") ) value = re.sub(r"[^\w\.\s-]", "", value).strip() value = re.sub(r"[-\.\s]+", "-", value) return mark_safe(value) if mark_safe else value
apache-2.0
7,351,634,692,586,380,000
29.469136
97
0.651945
false
Azure/azure-sdk-for-python
sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/operations/_os_updates_operations.py
1
9891
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class OSUpdatesOperations(object): """OSUpdatesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~test_base.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name, # type: str test_base_account_name, # type: str package_name, # type: str os_update_type, # type: Union[str, "_models.OsUpdateType"] **kwargs # type: Any ): # type: (...) -> Iterable["_models.OSUpdateListResult"] """Lists the OS Updates in which the package were tested before. :param resource_group_name: The name of the resource group that contains the resource. :type resource_group_name: str :param test_base_account_name: The resource name of the Test Base Account. :type test_base_account_name: str :param package_name: The resource name of the Test Base Package. :type package_name: str :param os_update_type: The type of the OS Update. :type os_update_type: str or ~test_base.models.OsUpdateType :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either OSUpdateListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~test_base.models.OSUpdateListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.OSUpdateListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-12-16-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'), 'packageName': self._serialize.url("package_name", package_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['osUpdateType'] = self._serialize.query("os_update_type", os_update_type, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('OSUpdateListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/osUpdates'} # type: ignore def get( self, resource_group_name, # type: str test_base_account_name, # type: str package_name, # type: str os_update_resource_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.OSUpdateResource" """Gets an OS Update by name in which the package was tested before. :param resource_group_name: The name of the resource group that contains the resource. :type resource_group_name: str :param test_base_account_name: The resource name of the Test Base Account. :type test_base_account_name: str :param package_name: The resource name of the Test Base Package. :type package_name: str :param os_update_resource_name: The resource name of an OS Update. :type os_update_resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: OSUpdateResource, or the result of cls(response) :rtype: ~test_base.models.OSUpdateResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.OSUpdateResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-12-16-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'), 'packageName': self._serialize.url("package_name", package_name, 'str'), 'osUpdateResourceName': self._serialize.url("os_update_resource_name", os_update_resource_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('OSUpdateResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/osUpdates/{osUpdateResourceName}'} # type: ignore
mit
2,570,028,533,368,190,500
48.954545
236
0.645233
false
squidboylan/apt-package-mirror
apt_package_mirror/__main__.py
1
2647
from __future__ import print_function import yaml import os from apt_package_mirror.mirror import Mirror import sys import argparse def main(): # When files are created make them with a 022 umask os.umask(022) # Add commandline options and help text for them parser = argparse.ArgumentParser() parser.add_argument('-U', '--update-packages-only', dest='update_packages_only', action='store_true', default=False, help='Grab new packages only') config_file_help = ('yaml config file that describes what mirror to copy ' 'and where to store the data') parser.add_argument( 'config_file', default='config.yaml', nargs='?', help=config_file_help ) args = parser.parse_args() # Check if the config file exists, if it doesnt fail with a message try: with open(args.config_file, "r") as file_stream: config = yaml.load(file_stream) except: print("failed to load the config file") sys.exit(1) # Check if the mirror path defined in the config file exists mirror_path = config['mirror_path'] if not os.path.exists(mirror_path): print("Mirror path does not exist, please fix it") sys.exit(1) # Check if the directory for temp files is defined try: temp_indices = config['temp_files_path'] except: temp_indices = None # Check if a log_level is defined try: log_level = config['log_level'] except: log_level = None # Check if a package_ttl is defined try: package_ttl = config['package_ttl'] except: package_ttl = None # Check if a hash_function is defined try: hash_function = config['hash_function'] except: hash_function = None # Create a file for logging in the location defined by the config file try: log_file = config['log_file'] f = open(log_file, 'a') f.close() except: log_file = None mirror = Mirror(mirror_path=mirror_path, mirror_url=config['mirror_url'], temp_indices=temp_indices, log_file=log_file, log_level=log_level, package_ttl=package_ttl, hash_function=hash_function) # If a -U option is used, only update the 'pool' directory. This only grabs # new packages if args.update_packages_only: mirror.update_pool() # If a -U option is not used, attempt to update the whole mirror else: mirror.sync() if __name__ == '__main__': main()
apache-2.0
4,230,655,349,344,754,000
27.771739
79
0.59728
false
ruddra/django-oscar
oscar/apps/basket/abstract_models.py
1
28617
from decimal import Decimal import zlib from django.db import models from django.db.models import Sum from django.conf import settings from django.utils.timezone import now from django.utils.translation import ugettext_lazy as _ from django.core.exceptions import ObjectDoesNotExist, PermissionDenied from oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager from oscar.apps.offer import results from oscar.core.compat import AUTH_USER_MODEL from oscar.templatetags.currency_filters import currency class AbstractBasket(models.Model): """ Basket object """ # Baskets can be anonymously owned - hence this field is nullable. When a # anon user signs in, their two baskets are merged. owner = models.ForeignKey( AUTH_USER_MODEL, related_name='baskets', null=True, verbose_name=_("Owner")) # Basket statuses # - Frozen is for when a basket is in the process of being submitted # and we need to prevent any changes to it. OPEN, MERGED, SAVED, FROZEN, SUBMITTED = ( "Open", "Merged", "Saved", "Frozen", "Submitted") STATUS_CHOICES = ( (OPEN, _("Open - currently active")), (MERGED, _("Merged - superceded by another basket")), (SAVED, _("Saved - for items to be purchased later")), (FROZEN, _("Frozen - the basket cannot be modified")), (SUBMITTED, _("Submitted - has been ordered at the checkout")), ) status = models.CharField( _("Status"), max_length=128, default=OPEN, choices=STATUS_CHOICES) # A basket can have many vouchers attached to it. However, it is common # for sites to only allow one voucher per basket - this will need to be # enforced in the project's codebase. vouchers = models.ManyToManyField( 'voucher.Voucher', null=True, verbose_name=_("Vouchers"), blank=True) date_created = models.DateTimeField(_("Date created"), auto_now_add=True) date_merged = models.DateTimeField(_("Date merged"), null=True, blank=True) date_submitted = models.DateTimeField(_("Date submitted"), null=True, blank=True) # Only if a basket is in one of these statuses can it be edited editable_statuses = (OPEN, SAVED) class Meta: abstract = True verbose_name = _('Basket') verbose_name_plural = _('Baskets') objects = models.Manager() open = OpenBasketManager() saved = SavedBasketManager() def __init__(self, *args, **kwargs): super(AbstractBasket, self).__init__(*args, **kwargs) # We keep a cached copy of the basket lines as we refer to them often # within the same request cycle. Also, applying offers will append # discount data to the basket lines which isn't persisted to the DB and # so we want to avoid reloading them as this would drop the discount # information. self._lines = None self.offer_applications = results.OfferApplications() def __unicode__(self): return _( u"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)") % { 'status': self.status, 'owner': self.owner, 'num_lines': self.num_lines} # ======== # Strategy # ======== @property def has_strategy(self): return hasattr(self, '_strategy') def _get_strategy(self): if not self.has_strategy: raise RuntimeError( "No strategy class has been assigned to this basket. " "This is normally assigned to the incoming request in " "oscar.apps.basket.middleware.BasketMiddleware. " "Since it is missing, you must be doing something different. " "Ensure that a strategy instance is assigned to the basket!" ) return self._strategy def _set_strategy(self, strategy): self._strategy = strategy strategy = property(_get_strategy, _set_strategy) def all_lines(self): """ Return a cached set of basket lines. This is important for offers as they alter the line models and you don't want to reload them from the DB as that information would be lost. """ if self.id is None: return self.lines.none() if self._lines is None: self._lines = self.lines.select_related( 'product', 'product__stockrecord' ).all().prefetch_related('attributes', 'product__images') # Assign strategy to each line so it can use it to determine # prices. This is only needed for Django 1.4.5, where accessing # self.basket from within the line will create a new basket # instance (with no strategy assigned). In later version, the # original basket instance is cached and keeps its strategy # property. for line in self._lines: line.strategy = self.strategy return self._lines def is_quantity_allowed(self, qty): """ Test whether the passed quantity of items can be added to the basket """ # We enfore a max threshold to prevent a DOS attack via the offers # system. basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD if basket_threshold: total_basket_quantity = self.num_items max_allowed = basket_threshold - total_basket_quantity if qty > max_allowed: return False, _( "Due to technical limitations we are not able " "to ship more than %(threshold)d items in one order.") % { 'threshold': basket_threshold, } return True, None # ============ # Manipulation # ============ def flush(self): """ Remove all lines from basket. """ if self.status == self.FROZEN: raise PermissionDenied("A frozen basket cannot be flushed") self.lines.all().delete() self._lines = None def add_product(self, product, quantity=1, options=None): """ Add a product to the basket 'stock_info' is the price and availability data returned from a partner strategy class. The 'options' list should contains dicts with keys 'option' and 'value' which link the relevant product.Option model and string value respectively. """ if options is None: options = [] if not self.id: self.save() # Ensure that all lines are the same currency price_currency = self.currency stock_info = self.strategy.fetch(product) if price_currency and stock_info.price.currency != price_currency: raise ValueError(( "Basket lines must all have the same currency. Proposed " "line has currency %s, while basket has currency %s") % ( stock_info.price.currency, price_currency)) if stock_info.stockrecord is None: raise ValueError(( "Basket lines must all have stock records. Strategy hasn't " "found any stock record for product %s") % product) # Line reference is used to distinguish between variations of the same # product (eg T-shirts with different personalisations) line_ref = self._create_line_reference( product, stock_info.stockrecord, options) # Determine price to store (if one exists). It is only stored for # audit and sometimes caching. defaults = { 'quantity': quantity, 'price_excl_tax': stock_info.price.excl_tax, 'price_currency': stock_info.price.currency, } if stock_info.price.is_tax_known: defaults['price_incl_tax'] = stock_info.price.incl_tax line, created = self.lines.get_or_create( line_reference=line_ref, product=product, stockrecord=stock_info.stockrecord, defaults=defaults) if created: for option_dict in options: line.attributes.create(option=option_dict['option'], value=option_dict['value']) else: line.quantity += quantity line.save() self.reset_offer_applications() add_product.alters_data = True add = add_product def applied_offers(self): """ Return a dict of offers successfully applied to the basket. This is used to compare offers before and after a basket change to see if there is a difference. """ return self.offer_applications.offers def reset_offer_applications(self): """ Remove any discounts so they get recalculated """ self.offer_applications = results.OfferApplications() self._lines = None def merge_line(self, line, add_quantities=True): """ For transferring a line from another basket to this one. This is used with the "Saved" basket functionality. """ try: existing_line = self.lines.get(line_reference=line.line_reference) except ObjectDoesNotExist: # Line does not already exist - reassign its basket line.basket = self line.save() else: # Line already exists - assume the max quantity is correct and # delete the old if add_quantities: existing_line.quantity += line.quantity else: existing_line.quantity = max(existing_line.quantity, line.quantity) existing_line.save() line.delete() finally: self._lines = None merge_line.alters_data = True def merge(self, basket, add_quantities=True): """ Merges another basket with this one. :basket: The basket to merge into this one. :add_quantities: Whether to add line quantities when they are merged. """ # Use basket.lines.all instead of all_lines as this function is called # before a strategy has been assigned. for line_to_merge in basket.lines.all(): self.merge_line(line_to_merge, add_quantities) basket.status = self.MERGED basket.date_merged = now() basket._lines = None basket.save() merge.alters_data = True def freeze(self): """ Freezes the basket so it cannot be modified. """ self.status = self.FROZEN self.save() freeze.alters_data = True def thaw(self): """ Unfreezes a basket so it can be modified again """ self.status = self.OPEN self.save() thaw.alters_data = True def submit(self): """ Mark this basket as submitted """ self.status = self.SUBMITTED self.date_submitted = now() self.save() submit.alters_data = True # Kept for backwards compatibility set_as_submitted = submit def is_shipping_required(self): """ Test whether the basket contains physical products that require shipping. """ for line in self.all_lines(): if line.product.is_shipping_required: return True return False # ======= # Helpers # ======= def _create_line_reference(self, product, stockrecord, options): """ Returns a reference string for a line based on the item and its options. """ base = '%s_%s' % (product.id, stockrecord.id) if not options: return base return "%s_%s" % (base, zlib.crc32(str(options))) def _get_total(self, property): """ For executing a named method on each line of the basket and returning the total. """ total = Decimal('0.00') for line in self.all_lines(): try: total += getattr(line, property) except ObjectDoesNotExist: # Handle situation where the product may have been deleted pass return total # ========== # Properties # ========== @property def is_empty(self): """ Test if this basket is empty """ return self.id is None or self.num_lines == 0 @property def is_tax_known(self): """ Test if tax values are known for this basket """ return all([line.is_tax_known for line in self.all_lines()]) @property def total_excl_tax(self): """ Return total line price excluding tax """ return self._get_total('line_price_excl_tax_incl_discounts') @property def total_tax(self): """Return total tax for a line""" return self._get_total('line_tax') @property def total_incl_tax(self): """ Return total price inclusive of tax and discounts """ return self._get_total('line_price_incl_tax_incl_discounts') @property def total_incl_tax_excl_discounts(self): """ Return total price inclusive of tax but exclusive discounts """ return self._get_total('line_price_incl_tax') @property def total_discount(self): return self._get_total('discount_value') @property def offer_discounts(self): """ Return basket discounts from non-voucher sources. Does not include shipping discounts. """ return self.offer_applications.offer_discounts @property def voucher_discounts(self): """ Return discounts from vouchers """ return self.offer_applications.voucher_discounts @property def shipping_discounts(self): """ Return discounts from vouchers """ return self.offer_applications.shipping_discounts @property def post_order_actions(self): """ Return discounts from vouchers """ return self.offer_applications.post_order_actions @property def grouped_voucher_discounts(self): """ Return discounts from vouchers but grouped so that a voucher which links to multiple offers is aggregated into one object. """ return self.offer_applications.grouped_voucher_discounts @property def total_excl_tax_excl_discounts(self): """ Return total price excluding tax and discounts """ return self._get_total('line_price_excl_tax') @property def num_lines(self): """Return number of lines""" return self.lines.all().count() @property def num_items(self): """Return number of items""" return reduce( lambda num, line: num + line.quantity, self.lines.all(), 0) @property def num_items_without_discount(self): num = 0 for line in self.all_lines(): num += line.quantity_without_discount return num @property def num_items_with_discount(self): num = 0 for line in self.all_lines(): num += line.quantity_with_discount return num @property def time_before_submit(self): if not self.date_submitted: return None return self.date_submitted - self.date_created @property def time_since_creation(self, test_datetime=None): if not test_datetime: test_datetime = now() return test_datetime - self.date_created @property def contains_a_voucher(self): if not self.id: return False return self.vouchers.all().count() > 0 @property def is_submitted(self): return self.status == self.SUBMITTED @property def can_be_edited(self): """ Test if a basket can be edited """ return self.status in self.editable_statuses @property def currency(self): # Since all lines should have the same currency, return the currency of # the first one found. for line in self.all_lines(): return line.price_currency # ============= # Query methods # ============= def contains_voucher(self, code): """ Test whether the basket contains a voucher with a given code """ if self.id is None: return False try: self.vouchers.get(code=code) except ObjectDoesNotExist: return False else: return True def product_quantity(self, product): """ Return the quantity of a product in the basket The basket can contain multiple lines with the same product, but different options and stockrecords. Those quantities are summed up. """ matching_lines = self.lines.filter(product=product) quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum'] return quantity or 0 def line_quantity(self, product, stockrecord, options=None): """ Return the current quantity of a specific product and options """ ref = self._create_line_reference(product, stockrecord, options) try: return self.lines.get(line_reference=ref).quantity except ObjectDoesNotExist: return 0 class AbstractLine(models.Model): """ A line of a basket (product and a quantity) """ basket = models.ForeignKey('basket.Basket', related_name='lines', verbose_name=_("Basket")) # This is to determine which products belong to the same line # We can't just use product.id as you can have customised products # which should be treated as separate lines. Set as a # SlugField as it is included in the path for certain views. line_reference = models.SlugField( _("Line Reference"), max_length=128, db_index=True) product = models.ForeignKey( 'catalogue.Product', related_name='basket_lines', verbose_name=_("Product")) # We store the stockrecord that should be used to fulfil this line. This # shouldn't really be NULLable but we need to keep it so for backwards # compatibility. stockrecord = models.ForeignKey( 'partner.StockRecord', related_name='basket_lines', null=True, blank=True) quantity = models.PositiveIntegerField(_('Quantity'), default=1) # We store the unit price incl tax of the product when it is first added to # the basket. This allows us to tell if a product has changed price since # a person first added it to their basket. price_currency = models.CharField( _("Currency"), max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY) price_excl_tax = models.DecimalField( _('Price excl. Tax'), decimal_places=2, max_digits=12, null=True) price_incl_tax = models.DecimalField( _('Price incl. Tax'), decimal_places=2, max_digits=12, null=True) # Track date of first addition date_created = models.DateTimeField(_("Date Created"), auto_now_add=True) def __init__(self, *args, **kwargs): super(AbstractLine, self).__init__(*args, **kwargs) # Instance variables used to persist discount information self._discount_excl_tax = Decimal('0.00') self._discount_incl_tax = Decimal('0.00') self._affected_quantity = 0 class Meta: abstract = True unique_together = ("basket", "line_reference") verbose_name = _('Basket line') verbose_name_plural = _('Basket lines') def __unicode__(self): return _( u"Basket #%(basket_id)d, Product #%(product_id)d, quantity %(quantity)d") % { 'basket_id': self.basket.pk, 'product_id': self.product.pk, 'quantity': self.quantity} def save(self, *args, **kwargs): """ Saves a line or deletes if the quantity is 0 """ if not self.basket.can_be_edited: raise PermissionDenied( _("You cannot modify a %s basket") % ( self.basket.status.lower(),)) if self.quantity == 0: return self.delete(*args, **kwargs) return super(AbstractLine, self).save(*args, **kwargs) # ============= # Offer methods # ============= def clear_discount(self): """ Remove any discounts from this line. """ self._discount_excl_tax = Decimal('0.00') self._discount_incl_tax = Decimal('0.00') self._affected_quantity = 0 def discount(self, discount_value, affected_quantity, incl_tax=True): """ Apply a discount to this line Note that it only makes sense to apply """ if incl_tax: if self._discount_excl_tax > 0: raise RuntimeError( "Attempting to discount the tax-inclusive price of a line " "when tax-exclusive discounts are already applied") self._discount_incl_tax += discount_value else: if self._discount_incl_tax > 0: raise RuntimeError( "Attempting to discount the tax-exclusive price of a line " "when tax-inclusive discounts are already applied") self._discount_excl_tax += discount_value self._affected_quantity += int(affected_quantity) def consume(self, quantity): """ Mark all or part of the line as 'consumed' Consumed items are no longer available to be used in offers. """ if quantity > self.quantity - self._affected_quantity: inc = self.quantity - self._affected_quantity else: inc = quantity self._affected_quantity += int(inc) def get_price_breakdown(self): """ Return a breakdown of line prices after discounts have been applied. Returns a list of (unit_price_incl_tx, unit_price_excl_tax, quantity) tuples. """ if not self.is_tax_known: raise RuntimeError("A price breakdown can only be determined " "when taxes are known") prices = [] if not self.has_discount: prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax, self.quantity)) else: # Need to split the discount among the affected quantity # of products. item_incl_tax_discount = ( self.discount_value / int(self._affected_quantity)) item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio prices.append((self.unit_price_incl_tax - item_incl_tax_discount, self.unit_price_excl_tax - item_excl_tax_discount, self._affected_quantity)) if self.quantity_without_discount: prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax, self.quantity_without_discount)) return prices # ======= # Helpers # ======= @property def _tax_ratio(self): if not self.unit_price_incl_tax: return 0 return self.unit_price_excl_tax / self.unit_price_incl_tax # ========== # Properties # ========== @property def has_discount(self): return self.quantity > self.quantity_without_discount @property def quantity_with_discount(self): return self._affected_quantity @property def quantity_without_discount(self): return int(self.quantity - self._affected_quantity) @property def is_available_for_discount(self): return self.quantity_without_discount > 0 @property def discount_value(self): # Only one of the incl- and excl- discounts should be non-zero return max(self._discount_incl_tax, self._discount_excl_tax) @property def stockinfo(self): """ Return the stock/price info """ if not hasattr(self, '_info'): # Cache the stockinfo (note that a strategy instance is assigned to # each line by the basket in the all_lines method). self._info = self.strategy.fetch( self.product, self.stockrecord) return self._info @property def is_tax_known(self): if not hasattr(self, 'strategy'): return False return self.stockinfo.price.is_tax_known @property def unit_price_excl_tax(self): return self.stockinfo.price.excl_tax @property def unit_price_incl_tax(self): return self.stockinfo.price.incl_tax @property def unit_tax(self): return self.stockinfo.price.tax @property def line_price_excl_tax(self): return self.quantity * self.unit_price_excl_tax @property def line_price_excl_tax_incl_discounts(self): if self._discount_excl_tax: return self.line_price_excl_tax - self._discount_excl_tax if self._discount_incl_tax: # This is a tricky situation. We know the discount as calculated # against tax inclusive prices but we need to guess how much of the # discount applies to tax-exclusive prices. We do this by # assuming a linear tax and scaling down the original discount. return self.line_price_excl_tax - self._tax_ratio * self._discount_incl_tax return self.line_price_excl_tax @property def line_price_incl_tax_incl_discounts(self): # We use whichever discount value is set. If the discount value was # calculated against the tax-exclusive prices, then the line price # including tax return self.line_price_incl_tax - self.discount_value @property def line_tax(self): return self.quantity * self.unit_tax @property def line_price_incl_tax(self): return self.quantity * self.unit_price_incl_tax @property def description(self): d = str(self.product) ops = [] for attribute in self.attributes.all(): ops.append("%s = '%s'" % (attribute.option.name, attribute.value)) if ops: d = "%s (%s)" % (d.decode('utf-8'), ", ".join(ops)) return d def get_warning(self): """ Return a warning message about this basket line if one is applicable This could be things like the price has changed """ if not self.stockrecord: msg = u"'%(product)s' is no longer available" return _(msg) % {'product': self.product.get_title()} if not self.price_incl_tax: return if not self.stockinfo.price.is_tax_known: return # Compare current price to price when added to basket current_price_incl_tax = self.stockinfo.price.incl_tax if current_price_incl_tax > self.price_incl_tax: msg = ("The price of '%(product)s' has increased from " "%(old_price)s to %(new_price)s since you added it " "to your basket") return _(msg) % { 'product': self.product.get_title(), 'old_price': currency(self.price_incl_tax), 'new_price': currency(current_price_incl_tax)} if current_price_incl_tax < self.price_incl_tax: msg = ("The price of '%(product)s' has decreased from " "%(old_price)s to %(new_price)s since you added it " "to your basket") return _(msg) % { 'product': self.product.get_title(), 'old_price': currency(self.price_incl_tax), 'new_price': currency(current_price_incl_tax)} class AbstractLineAttribute(models.Model): """ An attribute of a basket line """ line = models.ForeignKey('basket.Line', related_name='attributes', verbose_name=_("Line")) option = models.ForeignKey('catalogue.Option', verbose_name=_("Option")) value = models.CharField(_("Value"), max_length=255) class Meta: abstract = True verbose_name = _('Line attribute') verbose_name_plural = _('Line attributes')
bsd-3-clause
7,346,142,169,669,723,000
33.067857
89
0.587029
false
trezor/python-trezor
trezorlib/tests/device_tests/test_msg_verifymessage.py
1
8823
# This file is part of the Trezor project. # # Copyright (C) 2012-2018 SatoshiLabs and contributors # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the License along with this library. # If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>. import base64 from trezorlib import btc from .common import TrezorTest class TestMsgVerifymessage(TrezorTest): def test_message_long(self): self.setup_mnemonic_nopin_nopassphrase() ret = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "205ff795c29aef7538f8b3bdb2e8add0d0722ad630a140b6aefd504a5a895cbd867cbb00981afc50edd0398211e8d7c304bb8efa461181bc0afa67ea4a720a89ed" ), "VeryLongMessage!" * 64, ) assert ret is True def test_message_testnet(self): self.setup_mnemonic_nopin_nopassphrase() ret = btc.verify_message( self.client, "Testnet", "mirio8q3gtv7fhdnmb3TpZ4EuafdzSs7zL", bytes.fromhex( "209e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80" ), "This is an example of a signed message.", ) assert ret is True def test_message_grs(self): self.setup_mnemonic_allallall() ret = btc.verify_message( self.client, "Groestlcoin", "Fj62rBJi8LvbmWu2jzkaUX1NFXLEqDLoZM", base64.b64decode( "INOYaa/jj8Yxz3mD5k+bZfUmjkjB9VzoV4dNG7+RsBUyK30xL7I9yMgWWVvsL46C5yQtxtZY0cRRk7q9N6b+YTM=" ), "test", ) assert ret is True def test_message_verify(self): self.setup_mnemonic_nopin_nopassphrase() # uncompressed pubkey - OK res = btc.verify_message( self.client, "Bitcoin", "1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T", bytes.fromhex( "1ba77e01a9e17ba158b962cfef5f13dfed676ffc2b4bada24e58f784458b52b97421470d001d53d5880cf5e10e76f02be3e80bf21e18398cbd41e8c3b4af74c8c2" ), "This is an example of a signed message.", ) assert res is True # uncompressed pubkey - FAIL - wrong sig res = btc.verify_message( self.client, "Bitcoin", "1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T", bytes.fromhex( "1ba77e01a9e17ba158b962cfef5f13dfed676ffc2b4bada24e58f784458b52b97421470d001d53d5880cf5e10e76f02be3e80bf21e18398cbd41e8c3b4af74c800" ), "This is an example of a signed message.", ) assert res is False # uncompressed pubkey - FAIL - wrong msg res = btc.verify_message( self.client, "Bitcoin", "1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T", bytes.fromhex( "1ba77e01a9e17ba158b962cfef5f13dfed676ffc2b4bada24e58f784458b52b97421470d001d53d5880cf5e10e76f02be3e80bf21e18398cbd41e8c3b4af74c8c2" ), "This is an example of a signed message!", ) assert res is False # compressed pubkey - OK res = btc.verify_message( self.client, "Bitcoin", "1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8", bytes.fromhex( "1f44e3e461f7ca9f57c472ce1a28214df1de1dadefb6551a32d1907b80c74d5a1fbfd6daaba12dd8cb06699ce3f6941fbe0f3957b5802d13076181046e741eaaaf" ), "This is an example of a signed message.", ) assert res is True # compressed pubkey - FAIL - wrong sig res = btc.verify_message( self.client, "Bitcoin", "1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8", bytes.fromhex( "1f44e3e461f7ca9f57c472ce1a28214df1de1dadefb6551a32d1907b80c74d5a1fbfd6daaba12dd8cb06699ce3f6941fbe0f3957b5802d13076181046e741eaa00" ), "This is an example of a signed message.", ) assert res is False # compressed pubkey - FAIL - wrong msg res = btc.verify_message( self.client, "Bitcoin", "1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8", bytes.fromhex( "1f44e3e461f7ca9f57c472ce1a28214df1de1dadefb6551a32d1907b80c74d5a1fbfd6daaba12dd8cb06699ce3f6941fbe0f3957b5802d13076181046e741eaaaf" ), "This is an example of a signed message!", ) assert res is False # trezor pubkey - OK res = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "209e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80" ), "This is an example of a signed message.", ) assert res is True # trezor pubkey - FAIL - wrong sig res = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "209e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be00" ), "This is an example of a signed message.", ) assert res is False # trezor pubkey - FAIL - wrong msg res = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "209e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80" ), "This is an example of a signed message!", ) assert res is False def test_message_verify_bcash(self): self.setup_mnemonic_nopin_nopassphrase() res = btc.verify_message( self.client, "Bcash", "bitcoincash:qqj22md58nm09vpwsw82fyletkxkq36zxyxh322pru", bytes.fromhex( "209e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80" ), "This is an example of a signed message.", ) assert res is True def test_verify_bitcoind(self): self.setup_mnemonic_nopin_nopassphrase() res = btc.verify_message( self.client, "Bitcoin", "1KzXE97kV7DrpxCViCN3HbGbiKhzzPM7TQ", bytes.fromhex( "1cc694f0f23901dfe3603789142f36a3fc582d0d5c0ec7215cf2ccd641e4e37228504f3d4dc3eea28bbdbf5da27c49d4635c097004d9f228750ccd836a8e1460c0" ), u"\u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy", ) assert res is True def test_verify_utf(self): self.setup_mnemonic_nopin_nopassphrase() words_nfkd = u"Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\u0301l zo\u0301ny u\u0301lu\u030a" words_nfc = u"P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f" res_nfkd = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "20d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6" ), words_nfkd, ) res_nfc = btc.verify_message( self.client, "Bitcoin", "14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e", bytes.fromhex( "20d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6" ), words_nfc, ) assert res_nfkd is True assert res_nfc is True
lgpl-3.0
-2,778,977,320,289,823,000
37.36087
265
0.643092
false
chrislit/abydos
abydos/distance/_clement.py
1
4649
# Copyright 2018-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.distance._clement. Clement similarity """ from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union from ._token_distance import _TokenDistance from ..tokenizer import _Tokenizer __all__ = ['Clement'] class Clement(_TokenDistance): r"""Clement similarity. For two sets X and Y and a population N, Clement similarity :cite:`Clement:1976` is defined as .. math:: sim_{Clement}(X, Y) = \frac{|X \cap Y|}{|X|}\Big(1-\frac{|X|}{|N|}\Big) + \frac{|(N \setminus X) \setminus Y|}{|N \setminus X|} \Big(1-\frac{|N \setminus X|}{|N|}\Big) In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n, this is .. math:: sim_{Clement} = \frac{a}{a+b}\Big(1 - \frac{a+b}{n}\Big) + \frac{d}{c+d}\Big(1 - \frac{c+d}{n}\Big) .. versionadded:: 0.4.0 """ def __init__( self, alphabet: Optional[ Union[TCounter[str], Sequence[str], Set[str], int] ] = None, tokenizer: Optional[_Tokenizer] = None, intersection_type: str = 'crisp', **kwargs: Any ) -> None: """Initialize Clement instance. Parameters ---------- alphabet : Counter, collection, int, or None This represents the alphabet of possible tokens. See :ref:`alphabet <alphabet>` description in :py:class:`_TokenDistance` for details. tokenizer : _Tokenizer A tokenizer instance from the :py:mod:`abydos.tokenizer` package intersection_type : str Specifies the intersection type, and set type as a result: See :ref:`intersection_type <intersection_type>` description in :py:class:`_TokenDistance` for details. **kwargs Arbitrary keyword arguments Other Parameters ---------------- qval : int The length of each q-gram. Using this parameter and tokenizer=None will cause the instance to use the QGram tokenizer with this q value. metric : _Distance A string distance measure class for use in the ``soft`` and ``fuzzy`` variants. threshold : float A threshold value, similarities above which are counted as members of the intersection for the ``fuzzy`` variant. .. versionadded:: 0.4.0 """ super(Clement, self).__init__( alphabet=alphabet, tokenizer=tokenizer, intersection_type=intersection_type, **kwargs ) def sim(self, src: str, tar: str) -> float: """Return the Clement similarity of two strings. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison Returns ------- float Clement similarity Examples -------- >>> cmp = Clement() >>> cmp.sim('cat', 'hat') 0.5025379382522239 >>> cmp.sim('Niall', 'Neil') 0.33840586363079933 >>> cmp.sim('aluminum', 'Catalan') 0.12119877280918714 >>> cmp.sim('ATCG', 'TAGC') 0.006336616803332366 .. versionadded:: 0.4.0 """ if src == tar: return 1.0 self._tokenize(src, tar) a = self._intersection_card() b = self._src_only_card() c = self._tar_only_card() d = self._total_complement_card() n = self._population_unique_card() score = 0.0 if a + b: score += (a / (a + b)) * (1 - (a + b) / n) if c + d: score += (d / (c + d)) * (1 - (c + d) / n) return score if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
-6,004,276,056,573,090,000
28.238994
78
0.562056
false
maxzheng/remoteconfig
test/test_remoteconfig.py
1
1183
import time import httpretty from remoteconfig import config, RemoteConfig class TestRemoteConfig(object): @httpretty.activate def test_read(self): config_url = 'http://test-remoteconfig.com/config.ini' config_content = '[section]\n\nkey = value\n' httpretty.register_uri(httpretty.GET, config_url, body=config_content) config.read(config_url) assert config_content == str(config) # Cache duration on read updated_config_content = '[section]\n\nkey = updated\n' httpretty.register_uri(httpretty.GET, config_url, body=updated_config_content) config.read(config_url, cache_duration=1) assert config_content == str(config) # Cache duration on init config2 = RemoteConfig(config_url, cache_duration=1, kv_sep=': ') assert config_content.replace(' = ', ': ') == str(config2) # Using default cache duration config2.read(config_url) assert config_content.replace(' = ', ': ') == str(config2) time.sleep(1) # Should update after cache duration config.read(config_url) assert updated_config_content == str(config)
mit
5,379,055,401,091,969,000
31.861111
86
0.64497
false
5agado/intro-ai
src/test/basicTest.py
1
2831
from neural_network.perceptron import Perceptron from neural_network.neural_net import NeuralNet from util import utils import os from bitstring import BitArray import random from genetic_algorithm.population import Population def testPerceptron(): p = Perceptron(2) #p.useStepFunction = True p.t_sessions = 2000 t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/NAND.txt')) p.learn(t_model) print(p.getOutput([0, 0])) print(p.getOutput([0, 1])) print(p.getOutput([1, 0])) print(p.getOutput([1, 1])) def testNeuralNet(): net = NeuralNet(2, 2, 1) net.t_sessions = 20000 t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/XOR.txt')) net.learn(t_model) print(net.getOutputs([0, 0])) print(net.getOutputs([0, 1])) print(net.getOutputs([1, 0])) print(net.getOutputs([1, 1])) def numberRecognition(): net = NeuralNet(15, 2, 1) t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'number_grids.txt')) net.learn(t_model) print(net.getOutputs([0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0])) print(net.getOutputs([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1])) print(net.getOutputs([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1])) print(net.getOutputs([1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1])) print(net.getOutputs([1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1])) print(net.getOutputs([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0])) print(net.getOutputs([1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])) def initPopulation(p): for i in range(len(p.chromos)): for j in range(p.chromoSize): p.chromos[i].genes[j] = ( BitArray(int=random.randint(-200000, 200000), length=21)) def evolve(p, net, t_model): for _ in range(300): for chromo in p.chromos: genes = [(a.int)/100000.0 for a in chromo.genes] #print(genes) net.setWeights(genes) perf = net.getError(t_model) chromo.fitness = 1/perf #print(perf) #print(p.getTotalFitness()) p.newGeneration() def testNeuralNetWithGA(): net = NeuralNet(2, 2, 1) t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/NAND.txt')) Population.initPopulation = initPopulation Population.evolve = evolve p = Population(70, 9) p.initPopulation() p.evolve(net, t_model) print(net.getOutputs([0, 0])) print(net.getOutputs([0, 1])) print(net.getOutputs([1, 0])) print(net.getOutputs([1, 1])) print(net.getError(t_model)) #testPerceptron() #testNeuralNet() #testNeuralNetWithGA() #numberRecognition()
apache-2.0
345,943,458,821,668,500
33.860759
98
0.590957
false
thethythy/Mnemopwd
mnemopwd/client/corelayer/protocol/StateS33R.py
1
2965
# -*- coding: utf-8 -*- # Copyright (c) 2016, Thierry Lemeunier <thierry at lemeunier dot net> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ State S33 : Deletion """ from ...util.funcutils import singleton from .StateSCC import StateSCC @singleton class StateS33R(StateSCC): """State S33 : Deletion""" def do(self, handler, data): """Action of the state S33R: user account deletion request""" with handler.lock: try: # Challenge creation echallenge = self.compute_challenge(handler, b"S33.7") if echallenge: # Encrypt login elogin = handler.ephecc.encrypt( handler.login, pubkey=handler.ephecc.get_pubkey()) # Compute then encrypt id id = self.compute_client_id(handler.ms, handler.login) eid = handler.ephecc.encrypt( id, pubkey=handler.ephecc.get_pubkey()) # Send deletion request msg = echallenge + b';DELETION;' + eid + b';' + elogin handler.loop.call_soon_threadsafe(handler.transport.write, msg) # Notify the handler a property has changed handler.loop.run_in_executor( None, handler.notify, "connection.state", "User account deletion request") except Exception as exc: # Schedule a call to the exception handler handler.loop.call_soon_threadsafe(handler.exception_handler, exc) else: handler.state = handler.states['33A'] # Next state
bsd-2-clause
3,113,484,126,458,670,000
40.760563
83
0.651939
false
jlane9/selenium_data_attributes
setup.py
1
1363
"""setup.py ..codeauthor:: John Lane <[email protected]> """ from setuptools import setup, find_packages from sda import __author__, __email__, __license__, __version__ setup( name='sda', version=__version__, packages=find_packages(), scripts=[], description='A wrapper for Selenium. This library uses custom data attributes to accelerate ' 'testing through the Selenium framework', author=__author__, author_email=__email__, url='https://github.com/jlane9/selenium-data-attributes', download_url='https://github.com/jlane9/selenium-data-attributes/tarball/{}'.format(__version__), keywords='testing selenium qa web automation', install_requires=['lxml', 'cssselect'], license=__license__, classifiers=['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing'])
mit
7,108,862,239,312,840,000
39.088235
101
0.598679
false
maxfierke/dotcom-flask
app.py
1
6299
import datetime, os, socket from flask import abort, Flask, json, jsonify, redirect, request, render_template, Response, send_from_directory, url_for from flask.ext.admin import Admin from flask.ext.mongoengine import MongoEngine from flask.ext.mongorest import MongoRest from flask.ext.mongorest.views import ResourceView from flask.ext.mongorest.resources import Resource from flask.ext.mongorest import operators as ops from flask.ext.mongorest import methods from flask.ext.admin.form import rules from flask.ext.admin.contrib.mongoengine import ModelView from PIL import Image app = Flask(__name__) if socket.gethostname() == 'local-dev.maxfierke.com': app.config.from_object('config.DevConfig') else: app.config.from_object('config.ProdConfig') db = MongoEngine(app) api = MongoRest(app) class Project(db.Document): project_id = db.StringField(max_length=255, unique=True) name = db.StringField(max_length=60, required=True) status = db.ReferenceField('ProjectStatus') start_date = db.DateTimeField(required=True) end_date = db.DateTimeField() short_description = db.StringField(max_length=140, required=True) description = db.StringField(max_length=5000, required=True) image = db.ReferenceField('ProjectImage') github = db.StringField(max_length=200) links = db.ListField(db.ReferenceField('ProjectLink')) categories = db.ListField(db.ReferenceField('ProjectTag')) created_at = db.DateTimeField(default=datetime.datetime.now) updated_at = db.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.name class ProjectImage(db.Document): project = db.ReferenceField('Project', required=True) image = db.ImageField(thumbnail_size=(400, 400, True), required=True) created_at = db.DateTimeField(default=datetime.datetime.now) updated_at = db.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.project.name class ProjectLink(db.Document): title = db.StringField(max_length=140, required=True) url = db.URLField(required=True) created_at = db.DateTimeField(default=datetime.datetime.now) updated_at = db.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.title class ProjectStatus(db.Document): name = db.StringField(max_length=40, required=True) label = db.StringField(max_length=10, required=True) created_at = db.DateTimeField(default=datetime.datetime.now) updated_at = db.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.name class ProjectTag(db.Document): name = db.StringField(max_length=25, required=True) slug = db.StringField(max_length=25, required=True) created_at = db.DateTimeField(default=datetime.datetime.now) updated_at = db.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.name class ProjectLinkResource(Resource): document = ProjectLink class ProjectStatusResource(Resource): document = ProjectStatus class ProjectTagResource(Resource): document = ProjectTag class ProjectResource(Resource): document = Project related_resources = { 'categories': ProjectTagResource, 'links': ProjectLinkResource, 'status': ProjectStatusResource } filters = { 'project_id': [ops.Exact], } def get_object(self, pk, qfilter=None): qs = self.get_queryset() if qfilter: qs = qfilter(qs) return qs.get(project_id=pk) def get_objects(self, **kwargs): qs = super(ProjectResource, self).get_objects(**kwargs) return qs.order_by('+end_date', '-start_date', '+status__name') # Routed Functions ## HTML Site Functions @app.route('/favicon.ico') def favicon(): return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon') @app.route('/') def page_about(): return render_template('about.html', title='About') @app.route('/about/') def redirect_about(): return redirect(url_for('page_about'), code=301) @app.route('/projects/') def page_projects(): projects = Project.objects.order_by('+end_date', '-start_date', '+status__name').all() project_tags = ProjectTag.objects.order_by('name').all() return render_template('projects.html', title='Projects', projects=projects, project_tags=project_tags) ## API Functions @api.register(name='projects', url='/api/project/') class ProjectView(ResourceView): resource = ProjectResource methods = [methods.Fetch, methods.List] @app.route('/api/project/<project_id>/image', methods=['GET']) def api_project_image(project_id): project = Project.objects.get_or_404(project_id=project_id) proj_image = project.image.image.read() return Response(proj_image, mimetype=project.image.image.content_type) @app.route('/api/project/<project_id>/thumbnail', methods=['GET']) def api_project_thumb(project_id): project = Project.objects.get_or_404(project_id=project_id) proj_thumb = project.image.image.thumbnail.read() return Response(proj_thumb, mimetype=project.image.image.content_type) @app.route('/api', methods=['GET']) def api(): return render_template('api.html', title='API') ## Error Handlers @app.errorhandler(404) def page_not_found(error): return render_template('page_not_found.html', status_code=404, message='Not Found'), 404 @app.errorhandler(500) def server_error(error): return render_template('errors.html', status_code=500, message='Internal Server Error'), 500 # Main hook if __name__ == '__main__': admin = Admin(app, name="MaxFierke.com") admin.add_view(ModelView(Project, name='Projects', endpoint='project', category='Projects')) admin.add_view(ModelView(ProjectImage, name='Project Images', endpoint='project_image', category='Projects')) admin.add_view(ModelView(ProjectLink, name='Project Links', endpoint='project_link', category='Projects')) admin.add_view(ModelView(ProjectStatus, name='Project Statuses', endpoint='project_status', category='Projects')) admin.add_view(ModelView(ProjectTag, name='Project Tags', endpoint='project_tag', category='Projects')) app.run(debug=True)
mit
5,754,946,228,814,816,000
35.622093
121
0.711542
false
AstroPrint/AstroBox
src/octoprint/timelapse.py
1
12285
# coding=utf-8 __author__ = "Gina Häußge <[email protected]>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' import logging import os import threading import urllib import time import subprocess import fnmatch import datetime import sys import shutil import octoprint.util as util from octoprint.settings import settings from octoprint.events import eventManager, Events import sarge # currently configured timelapse current = None def getFinishedTimelapses(): files = [] basedir = settings().getBaseFolder("timelapse") for osFile in os.listdir(basedir): if not fnmatch.fnmatch(osFile, "*.mpg"): continue statResult = os.stat(os.path.join(basedir, osFile)) files.append({ "name": osFile, "size": util.getFormattedSize(statResult.st_size), "bytes": statResult.st_size, "date": util.getFormattedDateTime(datetime.datetime.fromtimestamp(statResult.st_ctime)) }) return files validTimelapseTypes = ["off", "timed", "zchange"] updateCallbacks = [] def registerCallback(callback): if not callback in updateCallbacks: updateCallbacks.append(callback) def unregisterCallback(callback): if callback in updateCallbacks: updateCallbacks.remove(callback) def notifyCallbacks(timelapse): if timelapse is None: config = None else: config = timelapse.configData() for callback in updateCallbacks: try: callback.sendTimelapseConfig(config) except: pass def configureTimelapse(config=None, persist=False): global current if config is None: config = settings().get(["webcam", "timelapse"]) if current is not None: current.unload() type = config["type"] postRoll = 0 if "postRoll" in config: postRoll = config["postRoll"] if type is None or "off" == type: current = None elif "zchange" == type: current = ZTimelapse(postRoll=postRoll) elif "timed" == type: interval = 10 if "options" in config and "interval" in config["options"]: interval = config["options"]["interval"] current = TimedTimelapse(postRoll=postRoll, interval=interval) notifyCallbacks(current) if persist: settings().set(["webcam", "timelapse"], config) settings().save() class Timelapse(object): def __init__(self, postRoll=0): self._logger = logging.getLogger(__name__) self._imageNumber = None self._inTimelapse = False self._gcodeFile = None self._postRoll = postRoll self._postRollStart = None self._onPostRollDone = None self._captureDir = settings().getBaseFolder("timelapse_tmp") self._movieDir = settings().getBaseFolder("timelapse") self._snapshotUrl = settings().get(["webcam", "snapshot"]) self._fps = 25 self._renderThread = None self._captureMutex = threading.Lock() # subscribe events eventManager().subscribe(Events.PRINT_STARTED, self.onPrintStarted) eventManager().subscribe(Events.PRINT_FAILED, self.onPrintDone) eventManager().subscribe(Events.PRINT_DONE, self.onPrintDone) eventManager().subscribe(Events.PRINT_RESUMED, self.onPrintResumed) for (event, callback) in self.eventSubscriptions(): eventManager().subscribe(event, callback) def postRoll(self): return self._postRoll def unload(self): if self._inTimelapse: self.stopTimelapse(doCreateMovie=False) # unsubscribe events eventManager().unsubscribe(Events.PRINT_STARTED, self.onPrintStarted) eventManager().unsubscribe(Events.PRINT_FAILED, self.onPrintDone) eventManager().unsubscribe(Events.PRINT_DONE, self.onPrintDone) eventManager().unsubscribe(Events.PRINT_RESUMED, self.onPrintResumed) for (event, callback) in self.eventSubscriptions(): eventManager().unsubscribe(event, callback) def onPrintStarted(self, event, payload): """ Override this to perform additional actions upon start of a print job. """ self.startTimelapse(payload["file"]['printFileName']) def onPrintDone(self, event, payload): """ Override this to perform additional actions upon the stop of a print job. """ self.stopTimelapse(success=(event==Events.PRINT_DONE)) def onPrintResumed(self, event, payload): """ Override this to perform additional actions upon the pausing of a print job. """ if not self._inTimelapse: self.startTimelapse(payload["file"]) def eventSubscriptions(self): """ Override this method to subscribe to additional events by returning an array of (event, callback) tuples. Events that are already subscribed: * PrintStarted - self.onPrintStarted * PrintResumed - self.onPrintResumed * PrintFailed - self.onPrintDone * PrintDone - self.onPrintDone """ return [] def configData(self): """ Override this method to return the current timelapse configuration data. The data should have the following form: type: "<type of timelapse>", options: { <additional options> } """ return None def startTimelapse(self, gcodeFile): self._logger.debug("Starting timelapse for %s" % gcodeFile) self.cleanCaptureDir() self._imageNumber = 0 self._inTimelapse = True self._gcodeFile = os.path.basename(gcodeFile) def stopTimelapse(self, doCreateMovie=True, success=True): self._logger.debug("Stopping timelapse") self._inTimelapse = False def resetImageNumber(): self._imageNumber = None def createMovie(): self._renderThread = threading.Thread(target=self._createMovie, kwargs={"success": success}) self._renderThread.daemon = True self._renderThread.start() def resetAndCreate(): resetImageNumber() createMovie() if self._postRoll > 0: self._postRollStart = time.time() if doCreateMovie: self._onPostRollDone = resetAndCreate else: self._onPostRollDone = resetImageNumber self.processPostRoll() else: self._postRollStart = None if doCreateMovie: resetAndCreate() else: resetImageNumber() def processPostRoll(self): pass def captureImage(self): if self._captureDir is None: self._logger.warn("Cannot capture image, capture directory is unset") return with self._captureMutex: filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber)) self._imageNumber += 1 self._logger.debug("Capturing image to %s" % filename) captureThread = threading.Thread(target=self._captureWorker, kwargs={"filename": filename}) captureThread.daemon = True captureThread.start() return filename def _captureWorker(self, filename): eventManager().fire(Events.CAPTURE_START, {"file": filename}) try: urllib.urlretrieve(self._snapshotUrl, filename) self._logger.debug("Image %s captured from %s" % (filename, self._snapshotUrl)) except: self._logger.exception("Could not capture image %s from %s, decreasing image counter again" % (filename, self._snapshotUrl)) if self._imageNumber is not None and self._imageNumber > 0: self._imageNumber -= 1 eventManager().fire(Events.CAPTURE_DONE, {"file": filename}) def _createMovie(self, success=True): ffmpeg = settings().get(["webcam", "ffmpeg"]) bitrate = settings().get(["webcam", "bitrate"]) if ffmpeg is None or bitrate is None: self._logger.warn("Cannot create movie, path to ffmpeg or desired bitrate is unset") return input = os.path.join(self._captureDir, "tmp_%05d.jpg") if success: output = os.path.join(self._movieDir, "%s_%s.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S"))) else: output = os.path.join(self._movieDir, "%s_%s-failed.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S"))) # prepare ffmpeg command command = [ ffmpeg, '-loglevel', 'error', '-i', input, '-vcodec', 'mpeg2video', '-pix_fmt', 'yuv420p', '-r', str(self._fps), '-y', '-b:v', bitrate, '-f', 'vob'] filters = [] # flip video if configured if settings().getBoolean(["webcam", "flipH"]): filters.append('hflip') if settings().getBoolean(["webcam", "flipV"]): filters.append('vflip') # add watermark if configured watermarkFilter = None if settings().getBoolean(["webcam", "watermark"]): watermark = os.path.join(os.path.dirname(__file__), "static", "img", "watermark.png") if sys.platform == "win32": # Because ffmpeg hiccups on windows' drive letters and backslashes we have to give the watermark # path a special treatment. Yeah, I couldn't believe it either... watermark = watermark.replace("\\", "/").replace(":", "\\\\:") watermarkFilter = "movie=%s [wm]; [%%(inputName)s][wm] overlay=10:main_h-overlay_h-10" % watermark filterstring = None if len(filters) > 0: if watermarkFilter is not None: filterstring = "[in] %s [postprocessed]; %s [out]" % (",".join(filters), watermarkFilter % {"inputName": "postprocessed"}) else: filterstring = "[in] %s [out]" % ",".join(filters) elif watermarkFilter is not None: filterstring = watermarkFilter % {"inputName": "in"} + " [out]" if filterstring is not None: self._logger.debug("Applying videofilter chain: %s" % filterstring) command.extend(["-vf", sarge.shell_quote(filterstring)]) # finalize command with output file self._logger.debug("Rendering movie to %s" % output) command.append(output) eventManager().fire(Events.MOVIE_RENDERING, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)}) command_str = " ".join(command) self._logger.debug("Executing command: %s" % command_str) p = sarge.run(command_str, stderr=sarge.Capture()) if p.returncode == 0: eventManager().fire(Events.MOVIE_DONE, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)}) else: returncode = p.returncode stderr_text = p.stderr.text self._logger.warn("Could not render movie, got return code %r: %s" % (returncode, stderr_text)) eventManager().fire(Events.MOVIE_FAILED, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output), "returncode": returncode, "error": stderr_text}) def cleanCaptureDir(self): if not os.path.isdir(self._captureDir): self._logger.warn("Cannot clean capture directory, it is unset") return for filename in os.listdir(self._captureDir): if not fnmatch.fnmatch(filename, "*.jpg"): continue os.remove(os.path.join(self._captureDir, filename)) class ZTimelapse(Timelapse): def __init__(self, postRoll=0): Timelapse.__init__(self, postRoll=postRoll) self._logger.debug("ZTimelapse initialized") def eventSubscriptions(self): return [ ("ZChange", self._onZChange) ] def configData(self): return { "type": "zchange" } def processPostRoll(self): Timelapse.processPostRoll(self) filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % self._imageNumber) self._imageNumber += 1 with self._captureMutex: self._captureWorker(filename) for i in range(self._postRoll * self._fps): newFile = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber)) self._imageNumber += 1 shutil.copyfile(filename, newFile) if self._onPostRollDone is not None: self._onPostRollDone() def _onZChange(self, event, payload): self.captureImage() class TimedTimelapse(Timelapse): def __init__(self, postRoll=0, interval=1): Timelapse.__init__(self, postRoll=postRoll) self._interval = interval if self._interval < 1: self._interval = 1 # force minimum interval of 1s self._timerThread = None self._logger.debug("TimedTimelapse initialized") def interval(self): return self._interval def configData(self): return { "type": "timed", "options": { "interval": self._interval } } def onPrintStarted(self, event, payload): Timelapse.onPrintStarted(self, event, payload) if self._timerThread is not None: return self._timerThread = threading.Thread(target=self._timerWorker) self._timerThread.daemon = True self._timerThread.start() def onPrintDone(self, event, payload): Timelapse.onPrintDone(self, event, payload) self._timerThread = None def _timerWorker(self): self._logger.debug("Starting timer for interval based timelapse") while self._inTimelapse or (self._postRollStart and time.time() - self._postRollStart <= self._postRoll * self._fps): self.captureImage() time.sleep(self._interval) if self._postRollStart is not None and self._onPostRollDone is not None: self._onPostRollDone() self._postRollStart = None
agpl-3.0
7,949,570,891,765,914,000
29.403465
180
0.707726
false
davidwhogg/HoneyComb
superpgram/code/header_time.py
1
1992
import numpy as np import pyfits import glob import matplotlib.pyplot as plt def real_footprint(t): """ # `real_footprint` Takes real Kepler (BJD) time values for a certain target and returns estimates of the starts, stops and centres """ dt = 0.02043359821692 # interval between observations (days) stops = t starts = t - dt centres = t - .5*dt return starts, stops, centres def real_footprint_sc(t): """ # `real_footprint` Takes real Kepler (BJD) time values for a certain target and returns estimates of the starts, stops and centres """ dt = 6.81119940564e-4 # interval between observations (days) (sc) stops = t starts = t - dt centres = t - .5*dt return starts, stops, centres def bjd2utc(t): """ # `bjd2utc` Takes real Kepler (BJD) time values for a certain target. Returns the spacecraft-UTC times (in days). A is an array of coefficients for a sinusoid + linear trend, fit to the timing data of 491 asteroseismic targets that are randomly distributed on the CCD. """ A = np.genfromtxt("A.txt").T w = 2*np.pi/372.5 # angular frequency (days-1) return t + A[0]*np.sin(w*t) + A[1]*np.cos(w*t) + A[2]*t + A[3] if __name__ == "__main__": # Test on a real target D = "/Users/angusr/angusr/data2" kid = "7341231" # a kepler target with lc and sc data chosen at `random' fnames = [] qs = range(17) x = [] for q in qs: fnames.append(glob.glob("%s/Q%s_public/kplr%s-*_llc.fits" % (D, q, kid.zfill(9)))[0]) # load test fits file for fname in fnames: hdulist = pyfits.open(fname) tbdata = hdulist[1].data x.extend(tbdata["TIME"]) # convert BJDs to UTCs x = np.array(x) + 2454833 # days utc = bjd2utc(x) # plot correction plt.clf() plt.plot(x, utc, "k.") plt.xlabel("BJD") plt.ylabel("BJD-UTC (days)") plt.savefig("demonstrate")
mit
-7,558,977,120,474,102,000
25.918919
77
0.603916
false
LRGH/amoco
amoco/arch/v850/env.py
1
3017
# -*- coding: utf-8 -*- # This code is part of Amoco # Copyright (C) 2018 Axel Tillequin ([email protected]) # published under GPLv2 license # import expressions: from amoco.cas.expressions import * # registers : # ----------- # main reg set: R = [reg("r%d" % r, 32) for r in range(32)] with is_reg_flags: PSW = reg("psw", 32) # program-status word Z = slc(PSW, 0, 1, "z") # Zero S = slc(PSW, 1, 1, "s") # Sign OV = slc(PSW, 2, 1, "ov") # Overlfow CY = slc(PSW, 3, 1, "cy") # Carry SAT = slc(PSW, 4, 1, "sat") # Saturation ID = slc(PSW, 5, 1, "id") # EI exception (TRAP) EP = slc(PSW, 6, 1, "ep") # exception type (0: interrupt, 1:other) NP = slc(PSW, 7, 1, "np") # FE exception IMP = slc( PSW, 16, 1, "imp" ) # instruction memory protection (0: trusted, 1: not trusted) DMP = slc(PSW, 17, 1, "dmp") # data memory protection (0: trusted, 1: not trusted) NPV = slc(PSW, 18, 1, "npv") # non-protected value (0: trusted, 1: not trusted) with is_reg_pc: pc = reg("pc", 16) with is_reg_stack: sp = reg("sp", 32) # stack ptr R[0] = cst(0, 32).to_sym("zero") R[3] = sp R[4] = reg("gp", 32) # global variable ptr R[5] = reg("tp", 32) # text area ptr R[30] = reg("ep", 32) # array/struct base ptr R[31] = reg("lp", 32) # link ptr # system registers: EIPC = reg("eipc", 32) EIPSW = reg("eipsw", 32) FEPC = reg("fepc", 32) FEPSW = reg("fepsw", 32) ECR = reg("ecr", 32) # exception cause SCCFG = reg("sccfg", 32) # SYSCAL op setting SCBP = reg("scbp", 32) # SYSCAL base ptr EIIC = reg("eiic", 32) FEIC = reg("feic", 32) DBIC = reg("dbic", 32) CTPC = reg("ctpc", 32) CTPSW = reg("ctpsw", 32) DBPC = reg("dbpc", 32) DBPSW = reg("dbpsw", 32) CTBP = reg("ctbp", 32) # CALLT base ptr EIWR = reg("eiwr", 32) FEWR = reg("fewr", 32) DBWR = reg("dbwr", 32) BSEL = reg("bsel", 32) # register bank select BNK = slc(BSEL, 0, 8, "bnk") GRP = slc(BSEL, 8, 8, "grp") CONDITION_V = 0b0000 # == CONDITION_NV = 0b1000 # != CONDITION_C = 0b0001 # >= (unsigned) CONDITION_NC = 0b1001 # < (unsigned) CONDITION_Z = 0b0010 # <0 CONDITION_NZ = 0b1010 # <0 CONDITION_NH = 0b0011 # <0 CONDITION_H = 0b1011 # <0 CONDITION_S = 0b0100 # <0 CONDITION_NS = 0b1100 # <0 CONDITION_T = 0b0101 # <0 CONDITION_SA = 0b1101 # <0 CONDITION_LT = 0b0110 # <0 CONDITION_GE = 0b1110 # <0 CONDITION_LE = 0b0111 # <0 CONDITION_GT = 0b1111 # <0 CONDITION = { CONDITION_V: ("v", OV == 1), CONDITION_NV: ("nv", OV == 0), CONDITION_C: ("c", CY == 1), CONDITION_NC: ("nc", CY == 0), CONDITION_Z: ("z", Z == 1), CONDITION_NZ: ("nz", Z == 0), CONDITION_NH: ("nh", (CY | Z) == 1), CONDITION_H: ("h", (CY | Z) == 0), CONDITION_S: ("neg", S == 1), CONDITION_NS: ("pos", S == 0), CONDITION_T: ("", bit1), CONDITION_SA: ("sat", SAT == 1), CONDITION_LT: ("lt", (S ^ OV) == 1), CONDITION_GE: ("ge", (S ^ OV) == 0), CONDITION_LE: ("le", ((S ^ OV) | Z) == 1), CONDITION_GT: ("gt", ((S ^ OV) | Z) == 0), }
gpl-2.0
-6,677,247,967,779,939,000
27.462264
87
0.551541
false
ychenracing/Spiders
onesixnine/onesixnine/middlewares.py
1
1908
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # http://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class OnesixnineSpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
apache-2.0
-8,819,020,306,805,128,000
33.035714
78
0.664218
false
arthurdk/gk-analysis
GKVisualizer.py
1
7581
import plotly from plotly.graph_objs import Scatter, Layout, Bar, Figure from wordcloud import WordCloud import matplotlib.pyplot as plt import plotly.graph_objs as go ''' class VisualizationStrategy: def __init__(self): pass # Homemade enumeration Plot, CSV, ASCII = range(3) ''' import random class GKVisualizer: def __init__(self, reviewers_filtering, group_by_option='nothing', rating_filters=[], word_cloud_background="white", color_scheme=None): self.reviewers_filtering = reviewers_filtering self.group_by = group_by_option self.rating_filters = rating_filters self.word_cloud_background = word_cloud_background self.word_cloud_color_scheme = color_scheme @staticmethod def _grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs): """ Credit to word_cloud project on github :param word: :param font_size: :param position: :param orientation: :param random_state: :param kwargs: :return: """ return "hsl(0, 0%%, %d%%)" % random.randint(60, 100) def word_cloud(self, frequencies, mask=None): if mask is not None: word_cloud = WordCloud(background_color=self.word_cloud_background, width=1200, height=1000, mask=mask ).generate_from_frequencies(frequencies) else: word_cloud = WordCloud(background_color=self.word_cloud_background, width=1200, height=1000 ).generate_from_frequencies(frequencies) if self.word_cloud_color_scheme is not None: plt.imshow(word_cloud.recolor(color_func=GKVisualizer._grey_color_func, random_state=3)) else: plt.imshow(word_cloud) plt.axis('off') plt.show() @staticmethod def display_gauge(labels, target, title): value = 100.0 / len(labels) values = [value] * len(labels) base_chart = { "values": values, "domain": {"x": [0, .48]}, "marker": { "line": { "width": 1 } }, "name": "Gauge", "hole": .4, "type": "pie", "direction": "clockwise", "showlegend": False, "hoverinfo": "none", "textinfo": "none", "textposition": "outside" } meter_chart = { "values": values, "labels": labels, 'textfont': { "size": 22, "color": "white" }, "domain": {"x": [0, 0.48]}, "name": "Gauge", "hole": .3, "type": "pie", "direction": "clockwise", "showlegend": False, "textinfo": "label", "textposition": "inside", "hoverinfo": "none" } layout = { 'title': title, 'xaxis': { 'showticklabels': False, 'autotick': False, 'showgrid': False, 'zeroline': False, }, 'yaxis': { 'showticklabels': False, 'autotick': False, 'showgrid': False, 'zeroline': False, }, 'annotations': [ { 'xref': 'paper', 'yref': 'paper', 'x': 0.23, 'y': 0.5, 'text': target, 'font': { "size": 22, "color": "black" }, 'showarrow': False } ] } # apparently we don't want the boundary now base_chart['marker']['line']['width'] = 0 fig = {"data": [base_chart, meter_chart], "layout": layout} plotly.offline.plot(fig) @staticmethod def _determine_min_max(reviews, min_date, max_date): for review in reviews: if min_date > review.date: min_date = review.date if max_date < review.date: max_date = review.date return min_date, max_date def determine_date(self, reviews): if self.group_by != 'nothing': max_date = reviews[0][0].date min_date = reviews[0][0].date for group in reviews: min_date, max_date = self._determine_min_max(reviews=group, min_date=min_date, max_date=max_date) else: max_date = reviews[0].date min_date = reviews[0].date min_date, max_date = self._determine_min_max(reviews=reviews, min_date=min_date, max_date=max_date) return min_date, max_date # TODO optimized to not call this one everytime def get_dated_title(self, title, grouped_reviews): """ Return the title with a proper date :param title: :param grouped_reviews: :return: """ min_date, max_date = self.determine_date(grouped_reviews) if min_date.year == max_date.year: title += " (%d)" % max_date.year else: title += " (%d to %d)" % (min_date.year, max_date.year) return title @staticmethod def get_named_title(title, reviewers): if len(reviewers) > 0: title += " (" + ", ".join(reviewers) + ") " return title def get_rating_filtered_title(self, title): for opt, rating in self.rating_filters: title += " (" + opt + " " + str(rating) + ")" return title def group_plot(self, data, labels, title, ylabel): figure = { "data": [ Bar(x=labels, y=data) ], "layout": Layout( title=title, xaxis=dict( title=self.group_by ), yaxis=dict( title=ylabel ), ) } plotly.offline.plot(figure) def double_group_plot(self, gk_grouped_reviews, y, ylabel, labels, title): traces = [] for idx in range(len(labels)): traces.append(go.Bar( x=gk_grouped_reviews.labels, y=y[:, idx], # value for the second level label name=labels[idx] # second level label )) layout = go.Layout( title=title, barmode='group', xaxis=dict( title=self.group_by ), yaxis=dict( title=ylabel ), ) fig = go.Figure(data=traces, layout=layout) plotly.offline.plot(fig) @staticmethod def scatter(x, y, title, ylabel): layout = dict(title=title, yaxis=dict( title=ylabel) , xaxis=dict( title="Date") ) # Create a trace trace = go.Scatter( x=x, y=y, mode='markers' ) data = [trace] fig = dict(data=data, layout=layout) plotly.offline.plot(fig)
mit
-1,554,547,537,877,476,000
28.729412
113
0.460361
false
edx/ecommerce
ecommerce/extensions/analytics/tests/test_analytics.py
1
1737
# -*- coding: utf-8 -*- """Unit tests for the analytics app.""" from django.apps import apps from django.test import override_settings from oscar.core.loading import get_model from ecommerce.tests.mixins import BasketCreationMixin from ecommerce.tests.testcases import TestCase ProductRecord = get_model('analytics', 'ProductRecord') class AnalyticsTests(BasketCreationMixin, TestCase): """Test analytics behavior in controlled scenarios.""" @override_settings(INSTALL_DEFAULT_ANALYTICS_RECEIVERS=False) def test_order_receiver_disabled(self): """Verify that Oscar's Analytics order receiver can be disabled.""" self._initialize() self.assert_successful_basket_creation(skus=[self.FREE_SKU], checkout=True) # Verify that no product records are kept self.assertFalse(ProductRecord.objects.all().exists()) @override_settings(INSTALL_DEFAULT_ANALYTICS_RECEIVERS=True) def test_order_receiver_enabled(self): """Verify that Oscar's Analytics order receiver can be re-enabled.""" self._initialize() self.assert_successful_basket_creation(skus=[self.FREE_SKU], checkout=True) # Verify that product order counts are recorded product = ProductRecord.objects.get(product=self.free_product) self.assertEqual(product.num_purchases, 1) def _initialize(self): """Execute initialization tasks for the analytics app.""" # Django executes app config during startup for every management command. # As a result, the `ready` method is only called once, before Django knows # it's running tests. As a workaround, we explicitly call the `ready` method. apps.get_app_config('analytics').ready()
agpl-3.0
4,935,367,456,486,433,000
38.477273
85
0.715026
false
HuStmpHrrr/gjms
clickonce/publish.py
1
3780
from __future__ import print_function import subprocess import os import sys import shutil import datetime import distutils.dir_util if sys.version_info < (3,): input = raw_input str = unicode pwd = os.getcwd() appver_file = r'.\AppVer' target_shares = { 'release': [], 'test' : [], 'dev' : [] } # it needs this transformation because msbuild does a direct string concatenation instead of a path join. def unify_path(p): if isinstance(p, str): p = p if p.endswith(os.path.sep) else p+os.path.sep return (p, p) else: return p target_shares = {k: [unify_path(p) for p in v] for k, v in target_shares.items()} output_dir = r'bin\publish' publish_dir = r'bin\publishapp.publish' msbuild_folder = r'%ProgramFiles%\MSBuild\12.0\bin\amd64' \ if os.path.exists(r'%s\MSBuild\12.0\bin\amd64' % os.environ['PROGRAMFILES'])\ else r'%ProgramFiles(x86)%\MSBuild\12.0\bin\amd64' def get_appver(): if not os.path.exists(appver_file): with open(appver_file) as fd: fd.write('1.0.0.0') return '1.0.0.0' with open(appver_file) as fd: return fd.readline().strip() def incr_appver(ver): vers = ver.split('.') vers[-1] = str(int(vers[-1]) + 1) return '.'.join(vers) def set_appver(ver): with open(appver_file, 'w') as fd: fd.write(ver) def get_cmd(target, updateurl, ver, env): template = r'"{0}\msbuild" /t:clean;publish /property:OutputPath={1},PublishUrl={2},InstallUrl={2},UpdateUrl={3},ApplicationVersion={4},MinimumRequiredVersion={4},AssemblyName="{5}"' cmd = template.format(msbuild_folder, output_dir, target, updateurl, ver, 'NST System Configurator '+env) return cmd if __name__=='__main__': error = {} print('current python implementation version is', sys.version) print('currently working in: {}'.format(pwd)) print('please make sure this script runs directly under the project folder.') env = input('build environment({}): '.format(', '.join(sorted(target_shares.keys())))) while env not in target_shares: print("nonexisting environment: {}".format(env), file=sys.stderr) env = input('build environment({}): '.format(', '.join(sorted(target_shares.keys())))) ver = incr_appver(get_appver()) for i, p in enumerate(target_shares[env]): target, updateutl = p cmd = get_cmd(target, updateurl, ver, env+str(i)) print('executing {}'.format(cmd)) print('----------------------------------') proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) with proc.stdout: for l in proc.stdout: print(l.strip().decode('utf-8')) proc.terminate() print('----------------------------------') if proc.returncode == 0: try: distutils.dir_util.copy_tree(publish_dir, target) except Exception as e: error[target] = e print('error occurred: {}'.format(str(e)), file=sys.stderr) distutils.dir_util.copy_tree(publish_dir, r'bin\backup' + '\\' + str(i)) else: print("error: {}".format(proc.returncode), file=sys.stderr) print if len(error) != 0: print('Error occurred:', file=sys.stderr) for k, e in error.items(): print('{}: {}'.format(k, str(e)), file=sys.stderr) print('has backed up the folder.', file=sys.stderr) try: set_appver(ver) except IOError as e: print("failed to write to file: {}".format(str(e)), file=sys.stderr) print('next application version will be {}.'.format(incr_appver(ver)), file=sys.stderr) input('press enter to continue...')
lgpl-2.1
2,146,401,426,755,421,700
34.327103
186
0.589153
false
0x6768/fqdn
setup.py
1
1144
from setuptools import setup setup( name='fqdn', packages=['fqdn'], version='1.1.0', description=('Validate fully-qualified domain names compliant ' 'to RFC 1035 and the preferred form in RFC 3686 s. 2.'), author='Guy Hughes', author_email='[email protected]', url='https://github.com/guyhughes/fqdn', keywords=['fqdn', 'domain', 'hostname', 'RFC3686', 'dns'], license='MPL 2.0', zip_safe=True, install_requires=['cached-property>=1.3.0'], test_suite='tests', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ] )
mpl-2.0
-8,930,818,900,220,796,000
37.133333
74
0.59965
false
piappl/robrex_mapping
img_tools/scripts/trajectory_capture.py
1
1683
#!/usr/bin/env python ## # @file trajectory_capture.py # @author Artur Wilkowski <[email protected]> # # @section LICENSE # # Copyright (C) 2015, Industrial Research Institute for Automation and Measurements # Security and Defence Systems Division <http://www.piap.pl> import roslib roslib.load_manifest('img_tools') import sys import rospy from nav_msgs.msg import Path import yaml import os class trajectory_capture: def pathcallback(self, data): self.mapper_path = data def savepath(self, filename): f = open(filename, 'w') for pose in self.mapper_path.poses: f.write(str(pose.header.seq) + ' ' + str(pose.pose.position.x) + ' ' + str(pose.pose.position.y) + ' ' + str(pose.pose.position.z) + ' ' + \ str(pose.pose.orientation.x) + ' ' + str(pose.pose.orientation.y) + ' ' + str(pose.pose.orientation.z) + ' ' + str(pose.pose.orientation.w) + '\n') #print pose.header.seq #print pose.pose.position.x #print pose.pose.position.y #print pose.pose.position.z #print pose.pose.orientation.x #print pose.pose.orientation.y #print pose.pose.orientation.z #print pose.pose.orientation.w f.close() def __init__(self): self.mapper_path_sub = rospy.Subscriber('mapper_path', Path, self.pathcallback) self.mapper_path = None def main(args): rospy.init_node('trajectory_capture', anonymous=True) ic = trajectory_capture() rospy.spin() print 'Saving ' + 'odompath.txt' + ' on exit.' ic.savepath('odompath.txt') if __name__ == '__main__': main(sys.argv)
gpl-2.0
3,916,604,659,894,667,000
30.166667
167
0.619727
false
Kane610/axis
axis/streammanager.py
1
3280
"""Python library to enable Axis devices to integrate with Home Assistant.""" import asyncio import logging from typing import Callable, List, Optional from .configuration import Configuration from .rtsp import SIGNAL_DATA, SIGNAL_FAILED, SIGNAL_PLAYING, STATE_STOPPED, RTSPClient _LOGGER = logging.getLogger(__name__) RTSP_URL = ( "rtsp://{host}/axis-media/media.amp" "?video={video}&audio={audio}&event={event}" ) RETRY_TIMER = 15 class StreamManager: """Setup, start, stop and retry stream.""" def __init__(self, config: Configuration) -> None: """Initialize stream manager.""" self.config = config self.video = None # Unsupported self.audio = None # Unsupported self.event = None self.stream: Optional[RTSPClient] = None self.connection_status_callback: List[Callable] = [] @property def stream_url(self) -> str: """Build url for stream.""" rtsp_url = RTSP_URL.format( host=self.config.host, video=self.video_query, audio=self.audio_query, event=self.event_query, ) _LOGGER.debug(rtsp_url) return rtsp_url @property def video_query(self) -> int: """Generate video query, not supported.""" return 0 @property def audio_query(self) -> int: """Generate audio query, not supported.""" return 0 @property def event_query(self) -> str: """Generate event query.""" return "on" if bool(self.event) else "off" def session_callback(self, signal: str) -> None: """Signalling from stream session. Data - new data available for processing. Playing - Connection is healthy. Retry - if there is no connection to device. """ if signal == SIGNAL_DATA and self.event: self.event(self.data) elif signal == SIGNAL_FAILED: self.retry() if signal in [SIGNAL_PLAYING, SIGNAL_FAILED]: for callback in self.connection_status_callback: callback(signal) @property def data(self) -> str: """Get stream data.""" return self.stream.rtp.data # type: ignore[union-attr] @property def state(self) -> str: """State of stream.""" if not self.stream: return STATE_STOPPED return self.stream.session.state def start(self) -> None: """Start stream.""" if not self.stream or self.stream.session.state == STATE_STOPPED: self.stream = RTSPClient( self.stream_url, self.config.host, self.config.username, self.config.password, self.session_callback, ) asyncio.create_task(self.stream.start()) def stop(self) -> None: """Stop stream.""" if self.stream and self.stream.session.state != STATE_STOPPED: self.stream.stop() def retry(self) -> None: """No connection to device, retry connection after 15 seconds.""" loop = asyncio.get_running_loop() self.stream = None loop.call_later(RETRY_TIMER, self.start) _LOGGER.debug("Reconnecting to %s", self.config.host)
mit
-2,756,108,687,357,434,400
28.818182
87
0.588415
false
dit/dit
dit/inference/counts.py
1
6393
""" Non-cython methods for getting counts and distributions from data. """ import numpy as np __all__ = ( 'counts_from_data', 'distribution_from_data', 'get_counts', ) try: # cython from .pycounts import counts_from_data, distribution_from_data except ImportError: # no cython from boltons.iterutils import windowed_iter from collections import Counter, defaultdict from itertools import product from .. import modify_outcomes from ..exceptions import ditException def counts_from_data(data, hLength, fLength, marginals=True, alphabet=None, standardize=True): """ Returns conditional counts from `data`. To obtain counts for joint distribution only, use fLength=0. Parameters ---------- data : NumPy array The data used to calculate morphs. Note: `data` cannot be a generator. Also, if standardize is True, then data can be any indexable iterable, such as a list or tuple. hLength : int The maxmimum history word length used to calculate morphs. fLength : int The length of future words that defines the morph. marginals : bool If True, then the morphs for all histories words from L=0 to L=hLength are calculated. If False, only histories of length L=hLength are calculated. alphabet : list The alphabet to use when creating the morphs. If `None`, then one is obtained from `data`. If not `None`, then the provided alphabet supplements what appears in the data. So the data is always scanned through in order to get the proper alphabet. standardize : bool The algorithm requires that the symbols in data be standardized to a canonical alphabet consisting of integers from 0 to k-1, where k is the alphabet size. If `data` is already standard, then an extra pass through the data can be avoided by setting `standardize` to `False`, but note: if `standardize` is False, then data MUST be a NumPy array. Returns ------- histories : list A list of observed histories, corresponding to the rows in `cCounts`. cCounts : NumPy array A NumPy array representing conditional counts. The rows correspond to the observed histories, so this is sparse. The number of rows in this array cannot be known in advance, but the number of columns will be equal to the alphabet size raised to the `fLength` power. hCounts : NumPy array A 1D array representing the count of each history word. alphabet : tuple The ordered tuple representing the alphabet of the data. If `None`, the one is created from the data. Notes ----- This requires three complete passes through the data. One to obtain the full alphabet. Another to standardize the data. A final pass to obtain the counts. This is implemented densely. So during the course of the algorithm, we work with a large array containing a row for each possible history. Only the rows corresponding to observed histories are returned. """ try: data = list(map(tuple, data)) except TypeError: pass counts = Counter(windowed_iter(data, hLength + fLength)) cond_counts = defaultdict(lambda: defaultdict(int)) for word, count in counts.items(): cond_counts[word[:hLength]][word[hLength:]] += count histories = sorted(counts.keys()) alphabet = set(alphabet) if alphabet is not None else set() alphabet = tuple(sorted(alphabet.union(*[set(hist) for hist in histories]))) cCounts = np.empty((len(histories), len(alphabet)**fLength)) for i, hist in enumerate(histories): for j, future in enumerate(product(alphabet, repeat=fLength)): cCounts[i, j] = cond_counts[hist][future] hCounts = cCounts.sum(axis=1) return histories, cCounts, hCounts, alphabet def distribution_from_data(d, L, trim=True, base=None): """ Returns a distribution over words of length `L` from `d`. The returned distribution is the naive estimate of the distribution, which assigns probabilities equal to the number of times a particular word appeared in the data divided by the total number of times a word could have appeared in the data. Roughly, it corresponds to the stationary distribution of a maximum likelihood estimate of the transition matrix of an (L-1)th order Markov chain. Parameters ---------- d : list A list of symbols to be converted into a distribution. L : integer The length of the words for the distribution. trim : bool If true, then words with zero probability are trimmed from the distribution. base : int or string The desired base of the returned distribution. If `None`, then the value of `dit.ditParams['base']` is used. """ from dit import ditParams, Distribution try: d = list(map(tuple, d)) except TypeError: pass if base is None: base = ditParams['base'] words, _, counts, _ = counts_from_data(d, L, 0) # We turn the counts to probabilities pmf = counts / counts.sum() dist = Distribution(words, pmf, trim=trim) dist.set_base(base) if L == 1: try: dist = modify_outcomes(dist, lambda o: o[0]) except ditException: pass return dist def get_counts(data, length): """ Count the occurrences of all words of `length` in `data`. Parameters ---------- data : iterable The sequence of samples length : int The length to group samples into. Returns ------- counts : np.array Array with the count values. """ hists, _, counts, _ = counts_from_data(data, length, 0) mask = np.array([len(h) == length for h in hists]) counts = counts[mask] return counts
bsd-3-clause
-8,556,384,628,051,415,000
33.556757
98
0.613796
false
alex-am/pyalgo
pyalgo/play/sum.py
1
1135
# Given a list of positive integers S = [s_i] and a positive integer t # find if t can be written as the sum of a subset of elements of S import numpy as np def is_sum(t, S): # is it np hard ? # O(n**2 t) # brute force 2**n S = list(filter(lambda x:x>0, S)) n = len(S) m = np.zeros((n, t), dtype=np.int) # first line for s in range(0, t): m[0, s] = s + 1 #we can be lucky for j in range(1, n): for i in range(1, t): for s in S: if i < s: continue if m[j-1, i-s]: m[j, i] = s if i == (t-1): #reached the target we are done return (j, m) break return (j, m) def get_shortest(i, m): _, t = m.shape res = [] while i >= 0: e = m[i, t-1] res.append(e) t = t - e if t <= 0: break i -= 1 return res if __name__ == "__main__": t = 12 S = [3 ,4, 4, 4, 3, 12, 45] i, m = is_sum(t, S) print(i, m) print(get_shortest(i, m))
gpl-3.0
438,430,288,067,461,700
22.666667
70
0.414978
false
thomas-hori/Repuge-NG
ludicrous/ScrollingInterface.py
1
2538
from ludicrous.SimpleInterface import SimpleInterface __copying__=""" Written by Thomas Hori This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.""" class ScrollingInterface(SimpleInterface): """A subclass of SimpleInterface adding simple panning.""" def get_offsets(self): """Used for LOS optimisation and get_viewport_grids.""" x = y = 0 if self.playerobj.pt: x, y = self.playerobj.pt width, height = self.display.get_dimensions() if width < 0: width = 80 if height < 0: height = 23 width -= 1 height -= 4 offsetx = x-(width//2) roffsetx = offsetx+width offsety = y-(height//2) roffsety = offsety+height return width, height, offsetx, offsety, roffsetx, roffsety def get_viewport_grids(self): if not self.level: return SimpleInterface.get_viewport_grids(self) width, height, offsetx, offsety, roffsetx, roffsety = self.get_offsets() levwidth = len(self.level.grid) levheight = len(self.level.grid[0]) colno = offsetx coords = [] grid_subset = [] objgrid_subset = [] for unused in range(width): if (colno >= 0) and (colno < levwidth): gcol = self.level.grid[colno] ocol = self.level.objgrid[colno] c_sub = [] g_sub = [] o_sub = [] rowno = offsety for unused2 in range(height): c_sub.append((colno, rowno)) if (colno < 0) or (colno >= levwidth) or (rowno < 0) or (rowno >= levheight): g_sub.append(("space", None)) o_sub.append([]) else: g_sub.append(gcol[rowno]) o_sub.append(ocol[rowno]) rowno += 1 coords.append(c_sub) grid_subset.append(g_sub) objgrid_subset.append(o_sub) colno += 1 return coords, grid_subset, objgrid_subset def get_viewport_pt(self): width, height = self.display.get_dimensions() if width < 0: width = 80 if height < 0: height = 23 width -= 1 height -= 4 return (width)//2, (height)//2
mpl-2.0
6,258,991,873,130,474,000
34.782609
93
0.520489
false
felixsch/mkcrowbar
tests/test_network.py
1
5238
from pytest import raises from mkcrowbar import network from fake import * def test_iface_has_ipv4_addr(capsys, monkeypatch): local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) args = ['-f', 'inet', 'addr', 'show', 'eth0'] local.has('ip', expect_args(args, load_fixture('ip_addr_show'))) ip = network.iface_has_ipv4_addr('eth0') assert ip == '11.22.33.44' def test_iface_backup_configuration(monkeypatch): iface = 'eth0' home_path = '/root/.mkcrowbar' iface_path = '/etc/sysconfig/network/ifcfg-' + iface monkeypatch.setattr('os.path.exists', LocalCommand('path.exists', expect_args([home_path], True))) monkeypatch.setattr('os.makedirs', LocalCommand('makedirs', expect_args([home_path]))) monkeypatch.setattr('os.path.isfile', LocalCommand('path.isfile', expect_args([iface_path]))) monkeypatch.setattr('os.rename', LocalCommand('rename', expect_args([iface_path]))) network.iface_backup_configuration(iface) def test_set_static_addr(monkeypatch): iface = 'eth0' path = '/etc/sysconfig/network/ifcfg-' + iface stub_file = StubOpen() monkeypatch.setattr('mkcrowbar.network.iface_backup_configuration', LocalCommand('backup', expect_args([iface]))) monkeypatch.setattr('builtins.open', LocalCommand('open', expect_args([path], lambda *args: stub_file))) network.iface_set_static_addr(iface, {'FOO': 'bar'}) assert 'DEVICE=eth0\n' in stub_file.result() assert 'BOOTPROTO=static\n' in stub_file.result() assert 'FOO=bar\n' in stub_file.result() def test_start_stop(monkeypatch): iface = 'eth1' local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) local.has('ifdown', expect_args([iface], return_ok())) assert network.iface_stop(iface) is True local.has('ifdown', expect_args([iface], return_error())) assert network.iface_stop(iface) is False local.has('ifup', expect_args([iface], return_ok())) assert network.iface_start(iface) is True local.has('ifup', expect_args([iface], return_error())) assert network.iface_start(iface) is False def test_uses_dhcp(monkeypatch): iface = 'eth1' path = '/etc/sysconfig/network/ifcfg-' + iface dhcp = StubOpen(monkeypatch, expect_args([path], load_fixture('ifcfg-dhcp'))) static = StubOpen(monkeypatch, expect_args([path], load_fixture('ifcfg-static'))) monkeypatch.setattr('builtins.open', lambda *args: dhcp(args)) assert network.iface_uses_dhcp(iface) is True monkeypatch.setattr('builtins.open', lambda *args: static(args)) assert network.iface_uses_dhcp(iface) is False def test_hostname(capsys, monkeypatch): local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) local.has('hostname', expect_args(['-f'], return_ok(' test.testme.com '))) assert network.hostname('-f') == 'test.testme.com' local.has('hostname', expect_args(['-f'], return_error('hostname: Name or service not known'))) with raises(SystemExit): network.hostname('-f') _, err = capsys.readouterr() assert err == 'hostname: Name or service not known' def test_set_hostname(monkeypatch): local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) local.has('hostname', expect_args(['newhostname'], return_ok())) assert network.set_hostname('newhostname') is True def test_is_qualified_hostname(monkeypatch): assert network.is_qualified_hostname('host') is False assert network.is_qualified_hostname('[email protected]') is False assert network.is_qualified_hostname('local.suse.com') is True assert network.is_qualified_hostname('superlocal.local.suse.com') is True def test_add_to_hosts(monkeypatch): fqdn = 'example.test.com' ip = '192.168.2.111' path = '/etc/hosts' clean_hosts = StubOpen(monkeypatch, expect_args([path], load_fixture('hosts'))) added_hosts = StubOpen(monkeypatch, expect_args([path], load_fixture('hosts-already-added'))) monkeypatch.setattr('builtins.open', lambda *args: clean_hosts(args)) assert network.add_to_hosts(ip, fqdn) is 0 assert '192.168.2.111 example.test.com example\n' in clean_hosts.result() monkeypatch.setattr('builtins.open', lambda *args: added_hosts(args)) assert network.add_to_hosts(ip, fqdn) is -1 def test_has_running_firewall(monkeypatch): local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) local.has('iptables', expect_args(['-S'], "-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT")) assert network.has_running_firewall() is False local.has('iptables', expect_args(['-S'], load_fixture('used_iptables'))) assert network.has_running_firewall() is True def test_is_domain_name_reachable(monkeypatch): local = LocalCommands() monkeypatch.setattr('mkcrowbar.network.local', local) local.has('ping', expect_args(['-c','1', 'fooo.net'], return_ok())) assert network.is_domain_name_reachable('fooo.net') is True local.has('ping', expect_args(['-c','1', 'fooooooo.net'], return_error())) assert network.is_domain_name_reachable('fooooooo.net') is False
apache-2.0
-4,987,254,012,026,377,000
34.391892
109
0.685567
false
kamladi/textback-web
twilio/rest/resources/__init__.py
1
1752
import twilio from twilio import TwilioException, TwilioRestException from twilio.rest.resources.imports import ( parse_qs, json, httplib2 ) from twilio.rest.resources.util import ( transform_params, format_name, parse_date, convert_boolean, convert_case, convert_keys, normalize_dates, UNSET_TIMEOUT ) from twilio.rest.resources.base import ( Response, Resource, InstanceResource, ListResource, make_request, make_twilio_request ) from twilio.rest.resources.phone_numbers import ( AvailablePhoneNumber, AvailablePhoneNumbers, PhoneNumber, PhoneNumbers ) from twilio.rest.resources.recordings import Recording, Recordings from twilio.rest.resources.transcriptions import Transcription, Transcriptions from twilio.rest.resources.notifications import Notification, Notifications from twilio.rest.resources.connect_apps import ( ConnectApp, ConnectApps, AuthorizedConnectApp, AuthorizedConnectApps ) from twilio.rest.resources.calls import Call, Calls from twilio.rest.resources.caller_ids import CallerIds, CallerId from twilio.rest.resources.sandboxes import Sandbox, Sandboxes from twilio.rest.resources.sms_messages import ( Sms, SmsMessage, SmsMessages, ShortCode, ShortCodes) from twilio.rest.resources.conferences import ( Participant, Participants, Conference, Conferences ) from twilio.rest.resources.queues import ( Member, Members, Queue, Queues, ) from twilio.rest.resources.applications import ( Application, Applications ) from twilio.rest.resources.accounts import Account, Accounts from twilio.rest.resources.usage import Usage from twilio.rest.resources.messages import Message, Messages from twilio.rest.resources.media import Media, MediaList from twilio.rest.resources.sip import Sip
mit
-1,907,901,667,675,662,600
35.5
78
0.811073
false
xhochy/g-octave
g_octave/description_tree.py
1
3391
# -*- coding: utf-8 -*- """ g_octave.description_tree ~~~~~~~~~~~~~~~~~~~~~~~~~ This module implements a Python object with the content of a directory tree with DESCRIPTION files. The object contains *g_octave.Description* objects for each DESCRIPTION file. :copyright: (c) 2009-2010 by Rafael Goncalves Martins :license: GPL-2, see LICENSE for more details. """ from __future__ import absolute_import __all__ = ['DescriptionTree'] import glob import os import re from .config import Config from .description import Description from .log import Log from portage.versions import vercmp log = Log('g_octave.description_tree') config = Config() # from http://wiki.python.org/moin/HowTo/Sorting/ def cmp_to_key(mycmp): 'Convert a cmp= function into a key= function' class K(object): def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) < 0 def __gt__(self, other): return mycmp(self.obj, other.obj) > 0 def __eq__(self, other): return mycmp(self.obj, other.obj) == 0 def __le__(self, other): return mycmp(self.obj, other.obj) <= 0 def __ge__(self, other): return mycmp(self.obj, other.obj) >= 0 def __ne__(self, other): return mycmp(self.obj, other.obj) != 0 return K class DescriptionTree(list): def __init__(self, parse_sysreq=True): log.info('Parsing the package database.') list.__init__(self) self._categories = [i.strip() for i in config.categories.split(',')] for my_file in glob.glob(os.path.join(config.db, 'octave-forge', \ '**', '**', '*.DESCRIPTION')): description = Description(my_file, parse_sysreq=parse_sysreq) if description.CAT in self._categories: self.append(description) def package_versions(self, pn): tmp = [] for pkg in self: if pkg.PN == pn: tmp.append(pkg.PV) tmp.sort(key=cmp_to_key(vercmp)) return tmp def latest_version(self, pn): tmp = self.package_versions(pn) return (len(tmp) > 0) and tmp[-1] or None def latest_version_from_list(self, pv_list): tmp = pv_list[:] tmp.sort(key=cmp_to_key(vercmp)) return (len(tmp) > 0) and tmp[-1] or None def search(self, term): # term can be a regular expression re_term = re.compile(r'%s' % term) packages = {} for pkg in self: if re_term.search(pkg.PN) is not None: if pkg.PN not in packages: packages[pkg.PN] = [] packages[pkg.PN].append(pkg.PV) packages[pkg.PN].sort(key=cmp_to_key(vercmp)) return packages def list(self): packages = {} for category in self._categories: packages[category] = {} for pkg in self: if pkg.PN not in packages[pkg.CAT]: packages[pkg.CAT][pkg.PN] = [] packages[pkg.CAT][pkg.PN].append(pkg.PV) packages[pkg.CAT][pkg.PN].sort(key=cmp_to_key(vercmp)) return packages def get(self, p): for pkg in self: if pkg.P == p: return pkg return None
gpl-2.0
-2,548,059,103,229,188,600
29.54955
76
0.555883
false
alphagov/notifications-api
tests/app/db.py
1
42588
import random import uuid from datetime import date, datetime, timedelta import pytest from app import db from app.dao import fact_processing_time_dao from app.dao.email_branding_dao import dao_create_email_branding from app.dao.inbound_sms_dao import dao_create_inbound_sms from app.dao.invited_org_user_dao import save_invited_org_user from app.dao.invited_user_dao import save_invited_user from app.dao.jobs_dao import dao_create_job from app.dao.notifications_dao import dao_create_notification from app.dao.organisation_dao import ( dao_add_service_to_organisation, dao_create_organisation, ) from app.dao.permissions_dao import permission_dao from app.dao.service_callback_api_dao import save_service_callback_api from app.dao.service_data_retention_dao import insert_service_data_retention from app.dao.service_inbound_api_dao import save_service_inbound_api from app.dao.service_permissions_dao import dao_add_service_permission from app.dao.service_sms_sender_dao import ( dao_update_service_sms_sender, update_existing_sms_sender_with_inbound_number, ) from app.dao.services_dao import dao_add_user_to_service, dao_create_service from app.dao.templates_dao import dao_create_template, dao_update_template from app.dao.users_dao import save_model_user from app.models import ( EMAIL_TYPE, KEY_TYPE_NORMAL, LETTER_TYPE, MOBILE_TYPE, SMS_TYPE, AnnualBilling, ApiKey, BroadcastEvent, BroadcastMessage, BroadcastProvider, BroadcastProviderMessage, BroadcastProviderMessageNumber, BroadcastStatusType, Complaint, DailySortedLetter, Domain, EmailBranding, FactBilling, FactNotificationStatus, FactProcessingTime, InboundNumber, InboundSms, InvitedOrganisationUser, InvitedUser, Job, LetterBranding, LetterRate, Notification, NotificationHistory, Organisation, Permission, Rate, ReturnedLetter, Service, ServiceCallbackApi, ServiceContactList, ServiceEmailReplyTo, ServiceGuestList, ServiceInboundApi, ServiceLetterContact, ServicePermission, ServiceSmsSender, Template, TemplateFolder, User, WebauthnCredential, ) def create_user( *, mobile_number="+447700900986", email="[email protected]", state='active', id_=None, name="Test User" ): data = { 'id': id_ or uuid.uuid4(), 'name': name, 'email_address': email, 'password': 'password', 'mobile_number': mobile_number, 'state': state } user = User.query.filter_by(email_address=email).first() if not user: user = User(**data) save_model_user(user, validated_email_access=True) return user def create_permissions(user, service, *permissions): permissions = [ Permission(service_id=service.id, user_id=user.id, permission=p) for p in permissions ] permission_dao.set_user_service_permission(user, service, permissions, _commit=True) def create_service( user=None, service_name="Sample service", service_id=None, restricted=False, count_as_live=True, service_permissions=None, research_mode=False, active=True, email_from=None, prefix_sms=True, message_limit=1000, organisation_type='central', check_if_service_exists=False, go_live_user=None, go_live_at=None, crown=True, organisation=None, purchase_order_number=None, billing_contact_names=None, billing_contact_email_addresses=None, billing_reference=None, ): if check_if_service_exists: service = Service.query.filter_by(name=service_name).first() if (not check_if_service_exists) or (check_if_service_exists and not service): service = Service( name=service_name, message_limit=message_limit, restricted=restricted, email_from=email_from if email_from else service_name.lower().replace(' ', '.'), created_by=user if user else create_user(email='{}@digital.cabinet-office.gov.uk'.format(uuid.uuid4())), prefix_sms=prefix_sms, organisation_type=organisation_type, organisation=organisation, go_live_user=go_live_user, go_live_at=go_live_at, crown=crown, purchase_order_number=purchase_order_number, billing_contact_names=billing_contact_names, billing_contact_email_addresses=billing_contact_email_addresses, billing_reference=billing_reference, ) dao_create_service( service, service.created_by, service_id, service_permissions=service_permissions, ) service.active = active service.research_mode = research_mode service.count_as_live = count_as_live else: if user and user not in service.users: dao_add_user_to_service(service, user) return service def create_service_with_inbound_number( inbound_number='1234567', *args, **kwargs ): service = create_service(*args, **kwargs) sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first() inbound = create_inbound_number(number=inbound_number, service_id=service.id) update_existing_sms_sender_with_inbound_number(service_sms_sender=sms_sender, sms_sender=inbound_number, inbound_number_id=inbound.id) return service def create_service_with_defined_sms_sender( sms_sender_value='1234567', *args, **kwargs ): service = create_service(*args, **kwargs) sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first() dao_update_service_sms_sender(service_id=service.id, service_sms_sender_id=sms_sender.id, is_default=True, sms_sender=sms_sender_value) return service def create_template( service, template_type=SMS_TYPE, template_name=None, subject='Template subject', content='Dear Sir/Madam, Hello. Yours Truly, The Government.', reply_to=None, hidden=False, archived=False, folder=None, postage=None, process_type='normal', contact_block_id=None ): data = { 'name': template_name or '{} Template Name'.format(template_type), 'template_type': template_type, 'content': content, 'service': service, 'created_by': service.created_by, 'reply_to': reply_to, 'hidden': hidden, 'folder': folder, 'process_type': process_type, } if template_type == LETTER_TYPE: data["postage"] = postage or "second" if contact_block_id: data['service_letter_contact_id'] = contact_block_id if template_type != SMS_TYPE: data['subject'] = subject template = Template(**data) dao_create_template(template) if archived: template.archived = archived dao_update_template(template) return template def create_notification( template=None, job=None, job_row_number=None, to_field=None, status='created', reference=None, created_at=None, sent_at=None, updated_at=None, billable_units=1, personalisation=None, api_key=None, key_type=KEY_TYPE_NORMAL, sent_by=None, client_reference=None, rate_multiplier=None, international=False, phone_prefix=None, scheduled_for=None, normalised_to=None, one_off=False, reply_to_text=None, created_by_id=None, postage=None, document_download_count=None, ): assert job or template if job: template = job.template if created_at is None: created_at = datetime.utcnow() if to_field is None: to_field = '+447700900855' if template.template_type == SMS_TYPE else '[email protected]' if status not in ('created', 'validation-failed', 'virus-scan-failed', 'pending-virus-check'): sent_at = sent_at or datetime.utcnow() updated_at = updated_at or datetime.utcnow() if not one_off and (job is None and api_key is None): # we did not specify in test - lets create it api_key = ApiKey.query.filter(ApiKey.service == template.service, ApiKey.key_type == key_type).first() if not api_key: api_key = create_api_key(template.service, key_type=key_type) if template.template_type == 'letter' and postage is None: postage = 'second' data = { 'id': uuid.uuid4(), 'to': to_field, 'job_id': job and job.id, 'job': job, 'service_id': template.service.id, 'service': template.service, 'template_id': template.id, 'template_version': template.version, 'status': status, 'reference': reference, 'created_at': created_at, 'sent_at': sent_at, 'billable_units': billable_units, 'personalisation': personalisation, 'notification_type': template.template_type, 'api_key': api_key, 'api_key_id': api_key and api_key.id, 'key_type': api_key.key_type if api_key else key_type, 'sent_by': sent_by, 'updated_at': updated_at, 'client_reference': client_reference, 'job_row_number': job_row_number, 'rate_multiplier': rate_multiplier, 'international': international, 'phone_prefix': phone_prefix, 'normalised_to': normalised_to, 'reply_to_text': reply_to_text, 'created_by_id': created_by_id, 'postage': postage, 'document_download_count': document_download_count, } notification = Notification(**data) dao_create_notification(notification) return notification def create_notification_history( template=None, job=None, job_row_number=None, status='created', reference=None, created_at=None, sent_at=None, updated_at=None, billable_units=1, api_key=None, key_type=KEY_TYPE_NORMAL, sent_by=None, client_reference=None, rate_multiplier=None, international=False, phone_prefix=None, created_by_id=None, postage=None, id=None ): assert job or template if job: template = job.template if created_at is None: created_at = datetime.utcnow() if status != 'created': sent_at = sent_at or datetime.utcnow() updated_at = updated_at or datetime.utcnow() if template.template_type == 'letter' and postage is None: postage = 'second' data = { 'id': id or uuid.uuid4(), 'job_id': job and job.id, 'job': job, 'service_id': template.service.id, 'service': template.service, 'template_id': template.id, 'template_version': template.version, 'status': status, 'reference': reference, 'created_at': created_at, 'sent_at': sent_at, 'billable_units': billable_units, 'notification_type': template.template_type, 'api_key': api_key, 'api_key_id': api_key and api_key.id, 'key_type': api_key.key_type if api_key else key_type, 'sent_by': sent_by, 'updated_at': updated_at, 'client_reference': client_reference, 'job_row_number': job_row_number, 'rate_multiplier': rate_multiplier, 'international': international, 'phone_prefix': phone_prefix, 'created_by_id': created_by_id, 'postage': postage } notification_history = NotificationHistory(**data) db.session.add(notification_history) db.session.commit() return notification_history def create_job( template, notification_count=1, created_at=None, job_status='pending', scheduled_for=None, processing_started=None, processing_finished=None, original_file_name='some.csv', archived=False, contact_list_id=None, ): data = { 'id': uuid.uuid4(), 'service_id': template.service_id, 'service': template.service, 'template_id': template.id, 'template_version': template.version, 'original_file_name': original_file_name, 'notification_count': notification_count, 'created_at': created_at or datetime.utcnow(), 'created_by': template.created_by, 'job_status': job_status, 'scheduled_for': scheduled_for, 'processing_started': processing_started, 'processing_finished': processing_finished, 'archived': archived, 'contact_list_id': contact_list_id, } job = Job(**data) dao_create_job(job) return job def create_service_permission(service_id, permission=EMAIL_TYPE): dao_add_service_permission( service_id if service_id else create_service().id, permission) service_permissions = ServicePermission.query.all() return service_permissions def create_inbound_sms( service, notify_number=None, user_number='447700900111', provider_date=None, provider_reference=None, content='Hello', provider="mmg", created_at=None ): if not service.inbound_number: create_inbound_number( # create random inbound number notify_number or '07{:09}'.format(random.randint(0, 1e9 - 1)), provider=provider, service_id=service.id ) inbound = InboundSms( service=service, created_at=created_at or datetime.utcnow(), notify_number=service.get_inbound_number(), user_number=user_number, provider_date=provider_date or datetime.utcnow(), provider_reference=provider_reference or 'foo', content=content, provider=provider ) dao_create_inbound_sms(inbound) return inbound def create_service_inbound_api( service, url="https://something.com", bearer_token="some_super_secret", ): service_inbound_api = ServiceInboundApi(service_id=service.id, url=url, bearer_token=bearer_token, updated_by_id=service.users[0].id ) save_service_inbound_api(service_inbound_api) return service_inbound_api def create_service_callback_api( service, url="https://something.com", bearer_token="some_super_secret", callback_type="delivery_status" ): service_callback_api = ServiceCallbackApi(service_id=service.id, url=url, bearer_token=bearer_token, updated_by_id=service.users[0].id, callback_type=callback_type ) save_service_callback_api(service_callback_api) return service_callback_api def create_email_branding(colour='blue', logo='test_x2.png', name='test_org_1', text='DisplayName'): data = { 'colour': colour, 'logo': logo, 'name': name, 'text': text, } email_branding = EmailBranding(**data) dao_create_email_branding(email_branding) return email_branding def create_rate(start_date, value, notification_type): rate = Rate( id=uuid.uuid4(), valid_from=start_date, rate=value, notification_type=notification_type ) db.session.add(rate) db.session.commit() return rate def create_letter_rate(start_date=None, end_date=None, crown=True, sheet_count=1, rate=0.33, post_class='second'): if start_date is None: start_date = datetime(2016, 1, 1) rate = LetterRate( id=uuid.uuid4(), start_date=start_date, end_date=end_date, crown=crown, sheet_count=sheet_count, rate=rate, post_class=post_class ) db.session.add(rate) db.session.commit() return rate def create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name=None): id_ = uuid.uuid4() name = key_name if key_name else '{} api key {}'.format(key_type, id_) api_key = ApiKey( service=service, name=name, created_by=service.created_by, key_type=key_type, id=id_, secret=uuid.uuid4() ) db.session.add(api_key) db.session.commit() return api_key def create_inbound_number(number, provider='mmg', active=True, service_id=None): inbound_number = InboundNumber( id=uuid.uuid4(), number=number, provider=provider, active=active, service_id=service_id ) db.session.add(inbound_number) db.session.commit() return inbound_number def create_reply_to_email( service, email_address, is_default=True, archived=False ): data = { 'service': service, 'email_address': email_address, 'is_default': is_default, 'archived': archived, } reply_to = ServiceEmailReplyTo(**data) db.session.add(reply_to) db.session.commit() return reply_to def create_service_sms_sender( service, sms_sender, is_default=True, inbound_number_id=None, archived=False ): data = { 'service_id': service.id, 'sms_sender': sms_sender, 'is_default': is_default, 'inbound_number_id': inbound_number_id, 'archived': archived, } service_sms_sender = ServiceSmsSender(**data) db.session.add(service_sms_sender) db.session.commit() return service_sms_sender def create_letter_contact( service, contact_block, is_default=True, archived=False ): data = { 'service': service, 'contact_block': contact_block, 'is_default': is_default, 'archived': archived, } letter_content = ServiceLetterContact(**data) db.session.add(letter_content) db.session.commit() return letter_content def create_annual_billing( service_id, free_sms_fragment_limit, financial_year_start ): annual_billing = AnnualBilling( service_id=service_id, free_sms_fragment_limit=free_sms_fragment_limit, financial_year_start=financial_year_start ) db.session.add(annual_billing) db.session.commit() return annual_billing def create_domain(domain, organisation_id): domain = Domain(domain=domain, organisation_id=organisation_id) db.session.add(domain) db.session.commit() return domain def create_organisation( name='test_org_1', active=True, organisation_type=None, domains=None, organisation_id=None, purchase_order_number=None, billing_contact_names=None, billing_contact_email_addresses=None, billing_reference=None, ): data = { 'id': organisation_id, 'name': name, 'active': active, 'organisation_type': organisation_type, 'purchase_order_number': purchase_order_number, 'billing_contact_names': billing_contact_names, 'billing_contact_email_addresses': billing_contact_email_addresses, 'billing_reference': billing_reference, } organisation = Organisation(**data) dao_create_organisation(organisation) for domain in domains or []: create_domain(domain, organisation.id) return organisation def create_invited_org_user(organisation, invited_by, email_address='[email protected]'): invited_org_user = InvitedOrganisationUser( email_address=email_address, invited_by=invited_by, organisation=organisation, ) save_invited_org_user(invited_org_user) return invited_org_user def create_daily_sorted_letter(billing_day=None, file_name="Notify-20180118123.rs.txt", unsorted_count=0, sorted_count=0): daily_sorted_letter = DailySortedLetter( billing_day=billing_day or date(2018, 1, 18), file_name=file_name, unsorted_count=unsorted_count, sorted_count=sorted_count ) db.session.add(daily_sorted_letter) db.session.commit() return daily_sorted_letter def create_ft_billing(bst_date, template, *, provider='test', rate_multiplier=1, international=False, rate=0, billable_unit=1, notifications_sent=1, postage='none' ): data = FactBilling(bst_date=bst_date, service_id=template.service_id, template_id=template.id, notification_type=template.template_type, provider=provider, rate_multiplier=rate_multiplier, international=international, rate=rate, billable_units=billable_unit, notifications_sent=notifications_sent, postage=postage) db.session.add(data) db.session.commit() return data def create_ft_notification_status( bst_date, notification_type='sms', service=None, template=None, job=None, key_type='normal', notification_status='delivered', count=1 ): if job: template = job.template if template: service = template.service notification_type = template.template_type else: if not service: service = create_service() template = create_template(service=service, template_type=notification_type) data = FactNotificationStatus( bst_date=bst_date, template_id=template.id, service_id=service.id, job_id=job.id if job else uuid.UUID(int=0), notification_type=notification_type, key_type=key_type, notification_status=notification_status, notification_count=count ) db.session.add(data) db.session.commit() return data def create_process_time(bst_date='2021-03-01', messages_total=35, messages_within_10_secs=34): data = FactProcessingTime( bst_date=bst_date, messages_total=messages_total, messages_within_10_secs=messages_within_10_secs ) fact_processing_time_dao.insert_update_processing_time(data) def create_service_guest_list(service, email_address=None, mobile_number=None): if email_address: guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, email_address) elif mobile_number: guest_list_user = ServiceGuestList.from_string(service.id, MOBILE_TYPE, mobile_number) else: guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, '[email protected]') db.session.add(guest_list_user) db.session.commit() return guest_list_user def create_complaint(service=None, notification=None, created_at=None): if not service: service = create_service() if not notification: template = create_template(service=service, template_type='email') notification = create_notification(template=template) complaint = Complaint(notification_id=notification.id, service_id=service.id, ses_feedback_id=str(uuid.uuid4()), complaint_type='abuse', complaint_date=datetime.utcnow(), created_at=created_at if created_at else datetime.now() ) db.session.add(complaint) db.session.commit() return complaint def ses_complaint_callback_malformed_message_id(): return { 'Signature': 'bb', 'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde', 'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com', 'TopicArn': 'arn:ses_notifications', 'Type': 'Notification', 'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None, 'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","badMessageId":"ref1","destination":["[email protected]"]}}', # noqa 'SigningCertUrl': 'https://sns.pem' } def ses_complaint_callback_with_missing_complaint_type(): """ https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object """ return { 'Signature': 'bb', 'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde', 'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com', 'TopicArn': 'arn:ses_notifications', 'Type': 'Notification', 'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None, 'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["[email protected]"]}}', # noqa 'SigningCertUrl': 'https://sns.pem' } def ses_complaint_callback(): """ https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object """ return { 'Signature': 'bb', 'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde', 'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com', 'TopicArn': 'arn:ses_notifications', 'Type': 'Notification', 'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None, 'Message': '{"notificationType":"Complaint","complaint":{"complaintFeedbackType": "abuse", "complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["[email protected]"]}}', # noqa 'SigningCertUrl': 'https://sns.pem' } def ses_notification_callback(): return '{\n "Type" : "Notification",\n "MessageId" : "ref1",' \ '\n "TopicArn" : "arn:aws:sns:eu-west-1:123456789012:testing",' \ '\n "Message" : "{\\"notificationType\\":\\"Delivery\\",' \ '\\"mail\\":{\\"timestamp\\":\\"2016-03-14T12:35:25.909Z\\",' \ '\\"source\\":\\"[email protected]\\",' \ '\\"sourceArn\\":\\"arn:aws:ses:eu-west-1:123456789012:identity/testing-notify\\",' \ '\\"sendingAccountId\\":\\"123456789012\\",' \ '\\"messageId\\":\\"ref1\\",' \ '\\"destination\\":[\\"[email protected]\\"]},' \ '\\"delivery\\":{\\"timestamp\\":\\"2016-03-14T12:35:26.567Z\\",' \ '\\"processingTimeMillis\\":658,' \ '\\"recipients\\":[\\"[email protected]\\"],' \ '\\"smtpResponse\\":\\"250 2.0.0 OK 1457958926 uo5si26480932wjc.221 - gsmtp\\",' \ '\\"reportingMTA\\":\\"a6-238.smtp-out.eu-west-1.amazonses.com\\"}}",' \ '\n "Timestamp" : "2016-03-14T12:35:26.665Z",\n "SignatureVersion" : "1",' \ '\n "Signature" : "X8d7eTAOZ6wlnrdVVPYanrAlsX0SMPfOzhoTEBnQqYkrNWTqQY91C0f3bxtPdUhUt' \ 'OowyPAOkTQ4KnZuzphfhVb2p1MyVYMxNKcBFB05/qaCX99+92fjw4x9LeUOwyGwMv5F0Vkfi5qZCcEw69uVrhYL' \ 'VSTFTrzi/yCtru+yFULMQ6UhbY09GwiP6hjxZMVr8aROQy5lLHglqQzOuSZ4KeD85JjifHdKzlx8jjQ+uj+FLzHXPMA' \ 'PmPU1JK9kpoHZ1oPshAFgPDpphJe+HwcJ8ezmk+3AEUr3wWli3xF+49y8Z2anASSVp6YI2YP95UT8Rlh3qT3T+V9V8rbSVislxA==",' \ '\n "SigningCertURL" : "https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-bb750' \ 'dd426d95ee9390147a5624348ee.pem",' \ '\n "UnsubscribeURL" : "https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&S' \ 'subscriptionArn=arn:aws:sns:eu-west-1:302763885840:preview-emails:d6aad3ef-83d6-4cf3-a470-54e2e75916da"\n}' def create_service_data_retention( service, notification_type='sms', days_of_retention=3 ): data_retention = insert_service_data_retention( service_id=service.id, notification_type=notification_type, days_of_retention=days_of_retention ) return data_retention def create_invited_user(service=None, to_email_address=None): if service is None: service = create_service() if to_email_address is None: to_email_address = '[email protected]' from_user = service.users[0] data = { 'service': service, 'email_address': to_email_address, 'from_user': from_user, 'permissions': 'send_messages,manage_service,manage_api_keys', 'folder_permissions': [str(uuid.uuid4()), str(uuid.uuid4())] } invited_user = InvitedUser(**data) save_invited_user(invited_user) return invited_user def create_template_folder(service, name='foo', parent=None): tf = TemplateFolder(name=name, service=service, parent=parent) db.session.add(tf) db.session.commit() return tf def create_letter_branding(name='HM Government', filename='hm-government'): test_domain_branding = LetterBranding(name=name, filename=filename, ) db.session.add(test_domain_branding) db.session.commit() return test_domain_branding def set_up_usage_data(start_date): year = int(start_date.strftime('%Y')) one_week_earlier = start_date - timedelta(days=7) two_days_later = start_date + timedelta(days=2) one_week_later = start_date + timedelta(days=7) one_month_later = start_date + timedelta(days=31) # service with sms and letters: service_1_sms_and_letter = create_service( service_name='a - with sms and letter', purchase_order_number="service purchase order number", billing_contact_names="service billing contact names", billing_contact_email_addresses="[email protected] [email protected]", billing_reference="service billing reference" ) letter_template_1 = create_template(service=service_1_sms_and_letter, template_type='letter') sms_template_1 = create_template(service=service_1_sms_and_letter, template_type='sms') create_annual_billing( service_id=service_1_sms_and_letter.id, free_sms_fragment_limit=10, financial_year_start=year ) org_1 = create_organisation( name="Org for {}".format(service_1_sms_and_letter.name), purchase_order_number="org1 purchase order number", billing_contact_names="org1 billing contact names", billing_contact_email_addresses="[email protected] [email protected]", billing_reference="org1 billing reference" ) dao_add_service_to_organisation( service=service_1_sms_and_letter, organisation_id=org_1.id ) create_ft_billing(bst_date=one_week_earlier, template=sms_template_1, billable_unit=2, rate=0.11) create_ft_billing(bst_date=start_date, template=sms_template_1, billable_unit=2, rate=0.11) create_ft_billing(bst_date=two_days_later, template=sms_template_1, billable_unit=1, rate=0.11) create_ft_billing(bst_date=one_week_later, template=letter_template_1, notifications_sent=2, billable_unit=1, rate=.35, postage='first') create_ft_billing(bst_date=one_month_later, template=letter_template_1, notifications_sent=4, billable_unit=2, rate=.45, postage='second') create_ft_billing(bst_date=one_week_later, template=letter_template_1, notifications_sent=2, billable_unit=2, rate=.45, postage='second') # service with emails only: service_with_emails = create_service(service_name='b - emails') email_template = create_template(service=service_with_emails, template_type='email') org_2 = create_organisation( name='Org for {}'.format(service_with_emails.name), ) dao_add_service_to_organisation(service=service_with_emails, organisation_id=org_2.id) create_ft_billing(bst_date=start_date, template=email_template, notifications_sent=10) # service with letters: service_with_letters = create_service(service_name='c - letters only') letter_template_3 = create_template(service=service_with_letters, template_type='letter') org_for_service_with_letters = create_organisation( name="Org for {}".format(service_with_letters.name), purchase_order_number="org3 purchase order number", billing_contact_names="org3 billing contact names", billing_contact_email_addresses="[email protected] [email protected]", billing_reference="org3 billing reference" ) dao_add_service_to_organisation(service=service_with_letters, organisation_id=org_for_service_with_letters.id) create_ft_billing(bst_date=start_date, template=letter_template_3, notifications_sent=2, billable_unit=3, rate=.50, postage='first') create_ft_billing(bst_date=one_week_later, template=letter_template_3, notifications_sent=8, billable_unit=5, rate=.65, postage='second') create_ft_billing(bst_date=one_month_later, template=letter_template_3, notifications_sent=12, billable_unit=5, rate=.65, postage='second') # service with letters, without an organisation: service_with_letters_without_org = create_service(service_name='d - service without org') letter_template_4 = create_template(service=service_with_letters_without_org, template_type='letter') create_ft_billing(bst_date=two_days_later, template=letter_template_4, notifications_sent=7, billable_unit=4, rate=1.55, postage='rest-of-world') create_ft_billing(bst_date=two_days_later, template=letter_template_4, notifications_sent=8, billable_unit=4, rate=1.55, postage='europe') create_ft_billing(bst_date=two_days_later, template=letter_template_4, notifications_sent=2, billable_unit=1, rate=.35, postage='second') create_ft_billing(bst_date=two_days_later, template=letter_template_4, notifications_sent=1, billable_unit=1, rate=.50, postage='first') # service with chargeable SMS, without an organisation service_with_sms_without_org = create_service( service_name='b - chargeable sms', purchase_order_number="sms purchase order number", billing_contact_names="sms billing contact names", billing_contact_email_addresses="[email protected] [email protected]", billing_reference="sms billing reference" ) sms_template = create_template(service=service_with_sms_without_org, template_type='sms') create_annual_billing( service_id=service_with_sms_without_org.id, free_sms_fragment_limit=10, financial_year_start=year ) create_ft_billing(bst_date=one_week_earlier, template=sms_template, rate=0.11, billable_unit=12) create_ft_billing(bst_date=two_days_later, template=sms_template, rate=0.11) create_ft_billing(bst_date=one_week_later, template=sms_template, billable_unit=2, rate=0.11) # service with SMS within free allowance service_with_sms_within_allowance = create_service( service_name='e - sms within allowance' ) sms_template_2 = create_template(service=service_with_sms_within_allowance, template_type='sms') create_annual_billing( service_id=service_with_sms_within_allowance.id, free_sms_fragment_limit=10, financial_year_start=year ) create_ft_billing(bst_date=one_week_later, template=sms_template_2, billable_unit=2, rate=0.11) # dictionary with services and orgs to return return { "org_1": org_1, "service_1_sms_and_letter": service_1_sms_and_letter, "org_2": org_2, "service_with_emails": service_with_emails, "org_for_service_with_letters": org_for_service_with_letters, "service_with_letters": service_with_letters, "service_with_letters_without_org": service_with_letters_without_org, "service_with_sms_without_org": service_with_sms_without_org, "service_with_sms_within_allowance": service_with_sms_within_allowance, } def create_returned_letter(service=None, reported_at=None, notification_id=None): if not service: service = create_service(service_name='a - with sms and letter') returned_letter = ReturnedLetter( service_id=service.id, reported_at=reported_at or datetime.utcnow(), notification_id=notification_id or uuid.uuid4(), created_at=datetime.utcnow(), ) db.session.add(returned_letter) db.session.commit() return returned_letter def create_service_contact_list( service=None, original_file_name='EmergencyContactList.xls', row_count=100, template_type='email', created_by_id=None, archived=False, ): if not service: service = create_service(service_name='service for contact list', user=create_user()) contact_list = ServiceContactList( service_id=service.id, original_file_name=original_file_name, row_count=row_count, template_type=template_type, created_by_id=created_by_id or service.users[0].id, created_at=datetime.utcnow(), archived=archived, ) db.session.add(contact_list) db.session.commit() return contact_list def create_broadcast_message( template=None, *, service=None, # only used if template is not provided created_by=None, personalisation=None, content=None, status=BroadcastStatusType.DRAFT, starts_at=None, finishes_at=None, areas=None, stubbed=False ): if template: service = template.service template_id = template.id template_version = template.version personalisation = personalisation or {} content = template._as_utils_template_with_personalisation( personalisation ).content_with_placeholders_filled_in elif content: template_id = None template_version = None personalisation = None content = content else: pytest.fail('Provide template or content') broadcast_message = BroadcastMessage( service_id=service.id, template_id=template_id, template_version=template_version, personalisation=personalisation, status=status, starts_at=starts_at, finishes_at=finishes_at, created_by_id=created_by.id if created_by else service.created_by_id, areas=areas or {'areas': [], 'simple_polygons': []}, content=content, stubbed=stubbed ) db.session.add(broadcast_message) db.session.commit() return broadcast_message def create_broadcast_event( broadcast_message, sent_at=None, message_type='alert', transmitted_content=None, transmitted_areas=None, transmitted_sender=None, transmitted_starts_at=None, transmitted_finishes_at=None, ): b_e = BroadcastEvent( service=broadcast_message.service, broadcast_message=broadcast_message, sent_at=sent_at or datetime.utcnow(), message_type=message_type, transmitted_content=transmitted_content or {'body': 'this is an emergency broadcast message'}, transmitted_areas=transmitted_areas or broadcast_message.areas, transmitted_sender=transmitted_sender or 'www.notifications.service.gov.uk', transmitted_starts_at=transmitted_starts_at, transmitted_finishes_at=transmitted_finishes_at or datetime.utcnow() + timedelta(hours=24), ) db.session.add(b_e) db.session.commit() return b_e def create_broadcast_provider_message( broadcast_event, provider, status='sending' ): broadcast_provider_message_id = uuid.uuid4() provider_message = BroadcastProviderMessage( id=broadcast_provider_message_id, broadcast_event=broadcast_event, provider=provider, status=status, ) db.session.add(provider_message) db.session.commit() provider_message_number = None if provider == BroadcastProvider.VODAFONE: provider_message_number = BroadcastProviderMessageNumber( broadcast_provider_message_id=broadcast_provider_message_id) db.session.add(provider_message_number) db.session.commit() return provider_message def create_webauthn_credential( user, name='my key', *, credential_data='ABC123', registration_response='DEF456', ): webauthn_credential = WebauthnCredential( user=user, name=name, credential_data=credential_data, registration_response=registration_response ) db.session.add(webauthn_credential) db.session.commit() return webauthn_credential
mit
-4,070,065,704,934,377,500
33.794118
544
0.630483
false
stephen-hoover/Arignote
arignote/nnets/nets.py
1
40064
"""This module describes fully-functioning networks created from the pieces in `layer`. """ from __future__ import division, print_function import collections import inspect import numpy as np import six import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams from ..data import files from ..data import readers from ..nnets import layers from ..nnets import training from ..util import misc from ..util import netlog log = netlog.setup_logging("nets", level="INFO") def define_logistic_regression(n_classes, l1_reg=0, l2_reg=0): """Shortcut to build the list of layer definitions (a single layer, in this case) for a logistic regression classifier. Parameters ---------- n_classes : int Number of classes to calculate probabilities for l1_reg, l2_reg : float, optional L1 and L2 regularization strengths Returns ------- list Layer definitions suitable for input to a `NNClassifier` """ # This network is only an output layer. layer_defs = [["ClassificationOutputLayer", {"n_classes": n_classes, "l1": l1_reg, "l2": l2_reg}]] return layer_defs def define_cnn(n_classes, input_image_shape, n_kernels, filter_scale, poolsize, n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0): """Shortcut to build the list of layer definitions for a convolutional neural network Defines a series of convolutional layers, followed by max-pooling layers, after which a multi-layer perceptron calculates the probabilities of membership in each class. Parameters ---------- n_classes : int Number of classes to calculate probabilities for input_image_shape : list or tuple Shape of input image, (n_channels, n_pixels_x, n_pixels_y) n_kernels : list of ints Number of convolutional kernels in each convolutional layer filter_scale : list of ints Size of (square) filters in each convolutional layer. Must be the same length as `n_kernels`. poolsize : list of ints Size of (square) non-overlapping max-pooling kernel to be applied after each convolutional layer (may be zero, meaning no max pooling after that layer). Must be the same length as `n_kernels`. n_hidden : list of ints Number of units in each hidden layer dropout_p : float or list of floats Dropout fraction for input and each hidden layer. If a single float, this dropout fraction will be applied to every layer. activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"} Activation function to use for all layers l1_reg, l2_reg : float, optional L1 and L2 regularization strengths for all layers Returns ------- list Layer definitions suitable for input to a `NNClassifier` Examples -------- >>> layers = define_cnn(10, (28, 28), n_kernels=[32, 32], filter_scale=[4, 3], >>> poolsize=[0, 2], n_hidden=[400], dropout_p=0.2) >>> print(layers) [['InputImageLayer', {'n_images': 1, 'n_pixels': [28, 28], 'name': 'input'}], ['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-input'}], ['ConvLayer', {'activation': 'relu', 'filter_shape': (4, 4), 'n_output_maps': 32, 'name': 'conv0'}], ['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv0'}], ['ConvLayer', {'activation': 'relu', 'filter_shape': (3, 3), 'n_output_maps': 32, 'name': 'conv1'}], ['MaxPool2DLayer', {'name': 'maxpool1', 'pool_shape': (2, 2)}], ['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv1'}], ['FCLayer', {'activation': 'relu', 'l1': 0, 'l2': 0, 'n_units': 400, 'name': 'fc0'}], ['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-fc0'}], ['ClassificationOutputLayer', {'l1': 0, 'l2': 0, 'n_classes': 10}]] """ # Assume input images are 2D. If the `input_image_shape` is 3 elements, # the first element is the number of images in the input. Otherwise, assume # that there's only one image in the input. if len(input_image_shape) == 3: pass elif len(input_image_shape) == 2: input_image_shape = [1] + list(input_image_shape) else: raise ValueError("The input image shape must be (n_channels, n_pixels_x, n_pixels_y).") try: # Make sure that `n_hidden` is a list. len(n_hidden) except TypeError: n_hidden = [n_hidden] try: # Make sure that `dropout_p` is a list. len(dropout_p) except TypeError: dropout_p = (1 + len(n_hidden) + len(n_kernels)) * [dropout_p] if len(dropout_p) != len(n_kernels) + len(n_hidden) + 1: raise ValueError("Either specify one dropout for all layers or one dropout for " "each layer (inputs + hidden layers).") dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list. # Start by putting on the input layer. layer_defs = [["InputImageLayer", {"name": "input", "n_images": input_image_shape[0], "n_pixels": input_image_shape[1:]}]] input_do = dropout_p.pop() if input_do: layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}]) # Add convolutional layers. for i_conv, (kernels, filter, pool) in enumerate(zip(n_kernels, filter_scale, poolsize)): layer_defs.append(["ConvLayer", {"name": "conv{}".format(i_conv), "n_output_maps": kernels, "filter_shape": (filter, filter), "activation": activation}]) if pool: layer_defs.append(["MaxPool2DLayer", {"name": "maxpool{}".format(i_conv), "pool_shape": (pool, pool)}]) layer_do = dropout_p.pop() if layer_do: layer_defs.append(["DropoutLayer", {"name": "DO-conv{}".format(i_conv), "dropout_p": layer_do}]) # Add fully-connected layers. for i_hidden, hidden in enumerate(n_hidden): layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden), "n_units": hidden, "activation": activation, "l1": l1_reg, "l2": l2_reg}]) layer_do = dropout_p.pop() if layer_do: layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden), "dropout_p": layer_do}]) # Put on an output layer. layer_defs.append(["ClassificationOutputLayer", {"n_classes": n_classes, "l1": l1_reg, "l2": l2_reg}]) return layer_defs def define_mlp(n_classes, n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0): """Shortcut to create a multi-layer perceptron classifier Parameters ---------- n_classes : int Number of classes to calculate probabilities for n_hidden : list of ints Number of units in each hidden layer dropout_p : float or list of floats Dropout fraction for input and each hidden layer. If a single float, this dropout fraction will be applied to every layer. activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"} Activation function to use for all layers l1_reg, l2_reg : float, optional L1 and L2 regularization strengths for all layers Returns ------- list Layer definitions suitable for input to a `NNClassifier` Examples -------- >>> layers = define_mlp(10, [400, 400], [0.4, 0.25, 0.25], "prelu", l2_reg=1e-4) >>> print(layers) [['DropoutLayer', {'dropout_p': 0.4, 'name': 'DO-input'}], ['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc0'}], ['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc0'}], ['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc1'}], ['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc1'}], ['ClassificationOutputLayer', {'l1': 0, 'l2': 0.0001, 'n_classes': 10, 'name': 'output'}]] """ try: # Make sure that `n_hidden` is a list. len(n_hidden) except TypeError: n_hidden = [n_hidden] try: # Make sure that `dropout_p` is a list. len(dropout_p) except TypeError: dropout_p = (1 + len(n_hidden)) * [dropout_p] if len(dropout_p) != len(n_hidden) + 1: raise ValueError("Either specify one dropout for all layers or one dropout for " "each layer (inputs + hidden layers).") dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list. # Start by putting on dropout for the input layer (if any). layer_defs = [] input_do = dropout_p.pop() if input_do: layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}]) # Add fully-connected layers. for i_hidden, hidden in enumerate(n_hidden): layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden), "n_units": hidden, "activation": activation, "l1": l1_reg, "l2": l2_reg}]) layer_do = dropout_p.pop() if layer_do: layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden), "dropout_p": layer_do}]) # Put on an output layer. layer_defs.append(["ClassificationOutputLayer", {"name": "output", "n_classes": n_classes, "l1": l1_reg, "l2": l2_reg}]) return layer_defs class NNClassifier(object): r"""A neural net to be used for a classification task. The classification network is built from individual layers. Compilation doesn't happen until necessary at training time. This object can be pickled and unpickled; the entire state of the object will be stored. .. note:: After unpickling, the network will need to be compiled (either through `fit` or by calling `compile` directly) before it can be used. Parameters ---------- layer_defs : list Definition of the network layers. This should be a list of lists. name : str, optional Name of this neural network, for display purposes n_in : int or tuple, optional The shape of the input features. If supplied here, we'll initialize the network layers now. Otherwise, this will be inferred from the data supplied during a call to `fit` and the network layers will be constructed at that time. batch_size : int, optional Batch size to be used for training. Only needed now if `n_in` is also supplied -- it can be used to optimize convolutional layers on the CPU. random_state : int or np.random.RandomState, optional RNG or seed for a RNG. If not supplied, will be randomly initialized. Other Parameters ---------------- stored_network : str, optional Filename of pickled network. If supplied, initialize this object's layers from weights stored in the `stored_network`. The pickled network must have the same architecure as this network. theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional Symbolic random number generator. If not supplied, will be initialized from the numpy RNG. Attributes ---------- predict_proba : function Input batch of examples, output probabilities of each class for each example. Compiled by theano. predict : function Input batch of examples, output class with maximum probability for each example. Compiled by theano. layers_train : list List of `Layer` objects. Potentially non-deterministic; used for training. layers_inf : list Network used for inference, deterministic. Identical architecture to and shares parameters with `layers_train`. params : list All trainable parameters (theano shared variables) from this network param_update_rules : list All special update rules, one dictionary per parameter in `params` n_params : int Total number of individual trainable parameters trainer : training.SupervisedTraining Object used to train this network; present after calling `fit` Examples -------- >>> layers = [["FCLayer", {"name": "fc1", "n_units": 100, "activation": "relu", "l2": 0.001}], ["DropoutLayer", {"name": "DO-fc1", "dropout_p": 0.5}], ["ClassificationOutputLayer", {"name": "output", "n_classes": 10}]] >>> cls = NNClassifier(layers, name="Small example net", random_state=42) """ def __init__(self, layer_defs, name="Neural Network Classifier", n_in=None, batch_size=None, random_state=None, stored_network=None, theano_rng=None): self.input = None self.trainer = None self.n_in = n_in self.layer_defs = layer_defs self.batch_size = batch_size self.stored_network = stored_network self.name = name self.layers_train, self.layers_inf = [], [] self.l1, self.l2_sqr = 0, 0 self.params, self.param_update_rules, self.n_params = [], [], 0 if type(layer_defs) != list: raise TypeError("Please input a list of layer definitions.") self.set_rng(random_state, theano_rng) # Sets instance attributes `self.random_state` and `self.theano_rng`. self.pickled_theano_rng = None # Use this to restore previous parameters. # Define these Theano functions during the `compile` stage. self.p_y_given_x = None self.predict_proba = None self.predict = None if self.n_in is not None: self._build_network(self.n_in, batch_size) def _build_network(self, n_in, batch_size=None): """Create and store the layers of this network, along with auxiliary information such as lists of the trainable parameters in the network.""" self.n_in = np.atleast_1d(n_in) # Make sure that `n_in` is a list or tuple. if batch_size is not None: self.batch_size = batch_size # These next attributes are creating and storing Theano shared variables. # The Layers contain shared variables for all the trainable parameters, # and the regularization parameters are sums and products of the parameters. self.layers_train = self._build_layers_train(self.layer_defs, self.stored_network) self.layers_inf = self._duplicate_layer_stack(self.layers_train) self.l1, self.l2_sqr = self._get_regularization(self.layers_train) # Collect the trainable parameters from each layer and arrange them into lists. self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train) log.info("This network has {} trainable parameters.".format(self.n_params)) def _arrange_parameters(self, layers): """Extract all trainable parameters and any special update rules from each Layer. Also calculate the total number of trainable parameters in this network. Returns ------- A 3-tuple of (parameters, parameter update rules, and number of parameters). The first two elements are lists of equal length, and the number of parameters is an integer. """ # The parameters of the model are the parameters of the two layers it is made out of. params, param_update_rules = [], [] for ly in layers: params += ly.params param_update_rules += ly.param_update_rules # Calculate the total number of trainable parameters in this network. n_params = int(np.sum([np.sum([np.prod(param.get_value().shape) for param in layer.params]) for layer in layers if not getattr(layer, "fix_params", False)])) return params, param_update_rules, n_params def _get_regularization(self, layers): """Find the L1 and L2 regularization terms for this net. Combine the L1 and L2 terms from each Layer. Use the regularization strengths stored in each Layer. Note that the value returned is `l2_sqr`, the sum of squares of all weights, times the lambda parameter for each Layer. Returns ------- l1, l2_sqr : theano.shared The `l1` is the sum of absolute values of weights times lambda_l1 from each Layer, and `l2_sqr` is the sum of squares of weights times lambda_l2 from each Layer. """ # L1 norm; one regularization option is to require the L1 norm to be small. l1 = np.sum([ly.l1 for ly in layers if ly.l1 is not None]) if not l1: log.debug("No L1 regularization in this model.") l1 = theano.shared(np.cast[theano.config.floatX](0), "zero") # Square of the L2 norm; one regularization option is to require the # square of the L2 norm to be small. l2_sqr = np.sum([ly.l2_sqr for ly in layers if ly.l2_sqr is not None]) if not l2_sqr: log.debug("No L2 regularization in this model.") l2_sqr = theano.shared(np.cast[theano.config.floatX](0), "zero") return l1, l2_sqr def _build_layers_train(self, layer_defs, stored_network=None): """Creates a stack of neural network layers from the input layer definitions. This network is intended for use in training. **Parameters** * `layer_defs` <list> A list of Layer definitions. May contain Layers, in which case they're added directly to the list of output Layers. **Optional Parameters** * `stored_network` <str|None> A filename containing a previously stored neural network. If any layer definitions specify that they should be initialized with weights from an existing network, use the weights in the `stored_network`. **Returns** A list of initialized (but not compiled) neural network Layers. **Modifies** None """ if stored_network is not None: log.info('Reading weights from an existing network at "{}".'.format(stored_network)) stored_network = collections.OrderedDict(files.read_pickle(stored_network)["params"]) log.info("Building the \"{}\" network.".format(self.name)) if isinstance(layer_defs[0], layers.InputLayer): layer_objs = [] else: # Initialize the layers with an input layer, if we don't have one already. layer_objs = [layers.InputLayer(self.n_in, name="input")] for ly in layer_defs: if isinstance(ly, layers.Layer): # If this is already a Layer object, don't try to re-create it. layer_objs.append(ly) else: prev_ly = layer_objs[-1] if len(ly) == 1: ly.append({}) # No extra layer arguments. layer_name = ly[0] if not layer_name.endswith("Layer"): # All class names end with "Layer". layer_name += "Layer" if ((layer_name.startswith("BC01ToC01B") or layer_name.startswith("C01BToBC01")) and theano.config.device == "cpu"): log.warning("Skipping \"{}\" reshuffling layer for " "CPU training.".format(layer_name)) continue layer_kwargs = ly[1].copy() init_from = layer_kwargs.pop("load_params", False) if init_from: if init_from not in stored_network: raise ValueError("Couldn't find weights for layer {} in the input " "weights.".format(init_from)) layer_type = getattr(layers, layer_name) if "batch_size" in inspect.getargspec(layer_type.__init__).args: layer_kwargs.setdefault("batch_size", self.batch_size) layer_objs.append(layer_type(n_in=prev_ly.n_out, rng=self.rng, theano_rng=self.theano_rng, **layer_kwargs)) log.info("Added layer: {}".format(str(layer_objs[-1]))) if init_from: # Copy weights from the input file into this layer. for param, input_params in zip(layer_objs[-1].params, stored_network[init_from]): param.set_value(input_params[1], borrow=True) log.info("Copied input parameters from layer {} to layer " "{}.".format(init_from, layer_objs[-1].name)) return layer_objs def _duplicate_layer_stack(self, layer_stack): """Creates a stack of neural network Layers identical to the input `layer_stack`, and with weights tied to those Layers. This is useful to, for example, create a parallel network to be used for inference. **Parameters** * `layer_stack` <list of Layers> A list of initialized Layers. **Returns** A list of initialized (but not compiled) neural network Layers. **Modifies** None """ layer_objs = [] for i_ly, ly in enumerate(layer_stack): layer_type = type(ly) layer_kwargs = ly.get_params() # Construct a parallel network for inference. Tie the weights to the training network. layer_kwargs.update(layer_stack[i_ly].get_trainable_params()) layer_objs.append(layer_type(rng=self.rng, theano_rng=self.theano_rng, **layer_kwargs)) return layer_objs def get_loss(self, name, targets=None, inference=False, regularized=None): """Return a loss function. Parameters ---------- name : str Name of the loss function. One of ["nll", "error"]. May also be a list, in which case this function will return a list of loss functions. targets : theano symbolic variable, optional If None, will be initialized to a T.imatrix named "y". inference : bool, optional If True, return the loss from the inference network (for e.g. model validation). Otherwise use the training network. regularized : bool, optional Add regularization parameters to the loss? Default to True if `inference` is False and False if `inference` is True. Returns ------- Theano symbolic variable Represents the requested loss, or a list of symbolic variables if `name` is list-like. """ if self.input is None: raise RuntimeError("Compile this network before getting a loss function.") if regularized is None: regularized = not inference # If we got a list as input, return a list of loss functions. if misc.is_listlike(name): return [self.get_loss(n, targets=targets, inference=inference, regularized=regularized) for n in name] input_name = name name = name.lower() if name == "nll": name = "negative_log_likelihood" name = name.replace(" ", "_") if inference: output_layer = self.layers_inf[-1] else: output_layer = self.layers_train[-1] # Look for the cost function in the output layer. if not hasattr(output_layer, name): raise ValueError("Unrecognized loss function: \"{}\".".format(input_name)) if targets is None: targets = T.imatrix("y") # Labels, presented as 2D array of [int] labels loss = getattr(output_layer, name)(targets) if regularized: loss = loss + self.l1 + self.l2_sqr return loss def compile(self, input, recompile=False): """Compile the theano computation graphs and functions associated with this network. Parameters ---------- input : Theano symbolic variable The input to the network recompile : bool, optional If False, will not recompile an already-compiled network. """ if self.input is not None: if recompile: log.warning("Recompiling and resetting the existing network.") else: log.debug("This object already compiled. Not recompiling.") return self.input = input log.info("Compiling the \"{}\" training network.".format(self.name)) prev_output = input for ly in self.layers_train: ly.compile(prev_output) ly.compile_activations(self.input) prev_output = ly.output log.info("Compiling the \"{}\" inference network.".format(self.name)) prev_output = input for ly in self.layers_inf: ly.compile(prev_output) ly.compile_activations(self.input) prev_output = ly.output_inf # Allow predicting on fresh features. self.p_y_given_x = self.layers_inf[-1].p_y_given_x self.predict_proba = theano.function(inputs=[self.input], outputs=self.p_y_given_x) self.predict = theano.function(inputs=[self.input], outputs=self.layers_inf[-1].y_pred) # Now that we've compiled the network, we can restore a previous # Theano RNG state, if any. The "pickled_theano_rng" will only be # non-None if this object was unpickled. self._set_theano_rng(self.pickled_theano_rng) self.pickled_theano_rng = None def get_init_params(self): return dict(n_in=self.n_in, layer_defs=self.layer_defs, name=self.name, batch_size=self.batch_size, stored_network=self.stored_network) def set_trainable_params(self, inp, layers=None): """Set the trainable parameters in this network from trainable parameters in an input. Parameters ---------- inp : NNClassifier or string May be an existing NNClassifier, or a filename pointing to either a checkpoint or a pickled NNClassifier. layers : list of strings, optional If provided, set parameters only for the layers with these names, using layers with corresponding names in the input. """ # Get the input and check its type. # If the input is a string, try reading it first as a # checkpoint file, and then as a NNClassifier pickle. if isinstance(inp, six.string_types): try: inp = files.checkpoint_read(inp, get_metadata=False) except files.CheckpointError as err: inp = files.read_pickle(inp) if not isinstance(inp, NNClassifier): raise TypeError("Unable to restore weights from a \"{}\" object.".format(type(inp))) # Go through each layer in this object and set its weights. for ly in self.layers_train: if layers is not None and ly not in layers: continue if ly.has_trainable_params: ly.set_trainable_params(inp.get_layer(ly.name)) log.debug("Set trainable parameters in layer {} " "from input weights.".format(ly.name)) def get_layer(self, name, inf=False): """Returns the Layer object with the given name. Parameters ---------- name : str Name of the desired Layer object inf : bool, optional If True, search the inference (deterministic) Layers, otherwise search the training Layers. """ layers = self.layers_inf if inf else self.layers_train for ly in layers: if ly.name == name: return ly else: raise ValueError("Layer \"{}\" is not present in " "network \"{}\".".format(name, self.name)) def set_rng(self, rng, theano_rng=None): """Set the pseudo-random number generator in this object and in all Layers of this object. Parameters ---------- rng : int or numpy.random.RandomState or `RandomState.get_state()` theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional If not supplied, will be initialized from the `rng` Modifies -------- `self.rng` and `self.theano_rng` will be set with RNGs. Each Layer in `self.layers_train` and `self.layers_inf` will have their RNGs set to be the same objects as this network's new RNGs. """ # Set up the random number generator, if necessary. if rng is None: log.debug("Making new NNet RNG") rng = np.random.RandomState() elif isinstance(rng, int): # If we got a seed as input. log.debug("Setting RNG seed to {}.".format(rng)) rng = np.random.RandomState(rng) elif not isinstance(rng, np.random.RandomState): # Assume that anything else is the state of the RNG. log.debug("Initializing numpy RNG from previous state.") rng_state = rng rng = np.random.RandomState() rng.set_state(rng_state) if theano_rng is None: log.debug("Initializing new Theano RNG.") theano_rng = RandomStreams(rng.randint(2 ** 30)) self.rng = rng self.theano_rng = theano_rng for ly in self.layers_train + self.layers_inf: ly.rng = self.rng ly.theano_rng = self.theano_rng def _set_theano_rng(self, rng_state=None): """Set the current state of the theano_rng from a pickled state. .. note:: This can only be done after compiling the network! The Theano RNG needs to see where it fits in to the graph. http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs """ if rng_state is not None: for (su, input_su) in zip(self.theano_rng.state_updates, rng_state): su[0].set_value(input_su) def __getstate__(self): """Preserve the object's state. Don't try to pickle the Theano objects directly; Theano changes quickly. Store the values of layer weights as arrays instead (handled in the Layers' __getstate__ functions) and clear all compiled functions and symbolic variables. Those will need to be re-compiled before the network can be used again. """ state = self.__dict__.copy() state["p_y_given_x"], state["predict_proba"], state["predict"] = None, None, None state["l1"], state["l2_sqr"] = None, None state["params"], state["param_update_rules"] = None, None state["layers_inf"] = [] # This is redundant with `layers_train`; don't save both. state["rng"] = self.rng.get_state() state["input"] = None # http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs state["pickled_theano_rng"] = [su[0].get_value() for su in self.theano_rng.state_updates] state["theano_rng"] = None return state def __setstate__(self, state): """Allow unpickling from stored weights. """ self.__dict__.update(state) # Reconstruct this object's RNG. # The theano_rng won't be completely reconstructed until we recompile the network. self.set_rng(self.rng, self.theano_rng) # Rebuild everything we had to take apart before saving. Note that we'll # still need to call `compile` to make the network fully operational again. self.layers_inf = self._duplicate_layer_stack(self.layers_train) self.l1, self.l2_sqr = self._get_regularization(self.layers_train) # Collect the trainable parameters from each layer and arrange them into lists. self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train) def fit(self, X, y=None, valid=None, test=None, n_epochs=None, batch_size=None, augmentation=None, checkpoint=None, sgd_type="adadelta", lr_rule=None, momentum_rule=None, sgd_max_grad_norm=None, train_loss="nll", valid_loss="nll", test_loss=["error", "nll"], validation_frequency=None, validate_on_train=False, checkpoint_all=False, extra_metadata=None,): """Perform supervised training on the input data. When restoring a pickled `NNClassifier` object to resume training, data, augmentation functions, and checkpoint locations must be re-entered, but other parameters will be taken from the previously stored training state. (The `n_epochs` may be re-supplied to alter the number of epochs used, but will default to the previously supplied `n_epochs`.) Training may be stopped early by pressing ctrl-C. Training data may be provided in either of the following formats: - An array of (n_examples, n_features) in the first positional argument (keyed by `X`), and an array of (n_examples, n_labels) in the second positional argument (keyed by `y`) - An object of type `readers.DataWithHoldoutParitions` or `readers.Data` presented in the first positional argument Validation data may be optionally supplied with the `valid` key in one of the following formats (only if the training data were not given as a `readers.DataWithHoldoutParitions` object): - A tuple of (X, y), where `X` is an array of (n_validation_examples, n_features) and `y` is an array of (n_validation_examples, n_labels) - A `readers.Data` object - A float in the range [0, 1), in which case validation data will be held out from the supplied training data (only if training data were given as an array) Test data may be optionally supplied with the `test` key, using the same formats as for validation data. Parameters ---------- X, y, valid, test See above for discussion of allowed input formats. n_epochs : int Train for this many epochs. (An "epoch" is one complete pass through the training data.) Must be supplied unless resuming training. batch_size : int Number of examples in a minibatch. Must be provided if was not given during object construction. augmentation : function, optional Apply this function to each minibatch of training data. checkpoint : str, optional Filename for storing network during training. If supplied, Arignote will store the network after every epoch, as well as storing the network with the best validation loss and the final network. When using a checkpoint, the trainer will restore the network with best validation loss at the end of training. sgd_type : {"adadelta", "nag", "adagrad", "rmsprop", "sgd"} Choice for stochastic gradient descent algorithm to use in training lr_rule, momentum_rule : dict of sgd_updates.Rule params, optional Use these dictionaries of parameters to create Rule objects which describe how to alter the learning rate and momentum during training. train_loss, valid_loss : {"nll", "error"} Loss function for training and validation. With a custom output layer, may also be the name of a function which returns a theano symbolic variable giving the cost. ("nll" = "negative log likelihood") test_loss : str or list May be any of the loss functions usable for training, or a list of such functions. Other Parameters ---------------- sgd_max_grad_norm : float, optional If provided, scale gradients during training so that the norm of all gradients is no more than this value. validation_frequency : int, optional Check the validation loss after training on this many examples. Defaults to validating once per epoch. validate_on_train : bool, optional If set, calculate validation loss (using the deterministic network) on the training set as well. checkpoint_all : str, optional Keep the state of the network at every training step. Warning: may use lots of hard drive space. extra_metadata : dict, optional Store these keys with the pickled object. Returns ------- self : NNClassifier Examples -------- >>> lr_rule = {"rule": "stalled", "initial_value": 0.1, "multiply_by": 0.25, "interval": 5} >>> momentum_rule = {"rule": "stalled", "initial_value": 0.7, "decrease_by": -0.1, "final_value": 0.95, "interval": 5} >>> mnist_data = files.read_pickle(sample_data.mnist) >>> classifier.fit(mnist_data[0], n_epochs=50, valid=mnist_data[1], test=mnist_data[2], augmentation=None, checkpoint=checkpoint, sgd_type="nag", lr_rule=lr_rule, momentum_rule=momentum_rule, batch_size=128, train_loss="nll", valid_loss="nll", test_loss=["nll", "error"]) """ if batch_size is None: batch_size = self.batch_size # If the inputs are not `Data` objects, we need to wrap them before # the Trainer can make use of them. train_data = X if y is None else (X, y) train, valid, test = readers.to_data_partitions(train_data, valid, test, batch_size=batch_size) # If we didn't previously know how many features to expect in the input, we can now # build the layers of this neural network. if self.n_in is None: self._build_network(train.features.shape, batch_size=batch_size) if self.trainer is not None: trainer = self.trainer else: trainer = training.SupervisedTraining(sgd_type=sgd_type, lr_rule=lr_rule, momentum_rule=momentum_rule, sgd_max_grad_norm=sgd_max_grad_norm, max_epochs=n_epochs, validation_frequency=validation_frequency, validate_on_train=validate_on_train, train_loss=train_loss, valid_loss=valid_loss, test_loss=test_loss) self.trainer = trainer trained_network = trainer.fit(self, train, n_epochs=n_epochs, valid=valid, test=test, augmentation=augmentation, extra_metadata=extra_metadata, checkpoint=checkpoint, checkpoint_all=checkpoint_all) return trained_network
mit
-1,436,817,007,690,568,200
42.929825
117
0.592202
false
zeffii/BlenderLSystem3D
sverchok_script_node_version/3dlsystem.py
1
6593
import math from math import radians import random from random import randint import ast import bmesh import mathutils from mathutils import Vector, Euler """ lifted from: http://www.4dsolutions.net/ocn/lsystems.html """ class Lturtle: import mathutils from mathutils import Vector, Euler Xvec = Vector((1, 0, 0)) Yvec = Vector((0, 1, 0)) Zvec = Vector((0, 0, 1)) # looking down on YX axis. Z is vertical. stackstate = [] # remembers saved state delta = 0.2 # angle of rotation length = 0.5 # full length of turtle move thickness = 0.02 # default thickness of cylinder instrudict = { '+': 'turnleft', '-': 'turnright', '&': 'pitchdown', '^': 'pitchup', '<': 'leftroll', '>': 'rightroll', '[': 'storeloc_rot', ']': 'restoreloc_rot', '%': 'roll180', '$': 'rollhoriz', 'x': 'randturn', 't': 'gravity', 'F': 'fdraw', 'f': 'fnodraw', 'Z': 'halfdraw', 'z': 'halfnodraw', 'g': 'Fnorecord', '.': 'Nomove' } stored_states = [] verts = [] edges = [] def __init__(self, vPos=Vector((0, 0, 0))): self.vHeading = Vector((0, 0, 1)) self.vPos = vPos self.delta = 0.2 self.amp = 1.0 def chomp(self, instructions): getparam = 0 checkparam = 0 param = "" for item in instructions: if getparam: if item == ")": getparam = 0 # done getting command = command + "(" + param + ")" eval(command) continue else: param = param + item # building parameter continue if checkparam: # checking for parameter? checkparam = 0 if item == "(": param = "" getparam = 1 # parameter exists continue else: command = command + "()" # no parameter eval(command) # initializing command string command = "self." + self.instrudict.get(item, 'notHandled') checkparam = 1 # set flag else: # dealing with last item if checkparam: command = command + "()" # no parameter eval(command) def add_edge(self): i = len(self.verts) self.edges.append([i - 2, i - 1]) def add_verts(self, amp=1): self.verts.append(self.vPos[:]) self.vPos = self.vPos + (self.vHeading * self.length * amp) self.verts.append(self.vPos[:]) def fnodraw(self, n=""): self.vPos = self.vPos + self.vHeading * self.length print("Forward %s (no draw)" % n) def halfnodraw(self, n=""): self.vPos = self.vPos + (self.vHeading * self.length * 0.5) print("half no draw %s" % n) def fdraw(self, n=""): self.add_verts() self.add_edge() print("fdraw %s" % n) def halfdraw(self, n=""): self.add_verts(amp=0.5) self.add_edge() print("half draw %s" % n) # Turning, Pitch, Roll def storeloc_rot(self, n=""): self.stored_states.append([self.vPos, self.vHeading]) print("Store rotation and location %s" % n) def restoreloc_rot(self, n=""): if len(self.stored_states) > 0: self.vPos, self.vHeading = self.stored_states.pop() print("Restore rotation and location %s" % n) else: print('tried restore loc/rot but stored states was empty. you suck :)') def do_rotation(self, axis, sign, n=""): """ axis 0=x, 1=y, z=2 """ if n: self.delta = float(n) components = [0, 0, 0] components[axis] = sign * radians(self.delta) * self.amp myEul = Euler(components, 'XYZ') self.vHeading.rotate(myEul) def turnleft(self, n=""): self.do_rotation(1, 2, n) print("Turn Left around Z axis %s" % n) def turnright(self, n=""): self.do_rotation(-1, 2, n) print("Turn Right around Z axis %s" % n) def pitchdown(self, n=""): self.do_rotation(1, 1, n) print("Pitch down %s" % n) def pitchup(self, n=""): self.do_rotation(-1, 1, n) print("Pitch up %s" % n) def leftroll(self, n=""): self.do_rotation(1, 0, n) print("left roll %s" % n) def rightroll(self, n=""): self.do_rotation(-1, 0, n) print("right roll %s" % n) def turn180(self, n=""): self.do_rotation(-1, 2, 180) print("turn180 %s" % n) def roll180(self, n=""): self.do_rotation(1, 0, 180) print("roll180 %s" % n) def rollhoriz(self, n=""): # not exactly sure what this command was intended to do but how # about resetting to vertical. self.vHeading = Vector((0, 0, 1)) print("roll horiz %s" % n) def randturn(self, n=""): ax_x = radians(randint(0, 360)) ax_y = radians(randint(0, 360)) ax_z = radians(randint(0, 360)) myEul = Euler((ax_x, ax_y, ax_z), 'XYZ') self.vHeading.rotate(myEul) print("randturn %s" % n) def gravity(self, n=""): print("not handled yet") print("gravity %s" % n) def Fnorecord(self, n=""): print("Fnorecord %s" % n) def Nomove(self, n=""): print("No move %s" % n) def notHandled(self, n=""): print("Not handled %s" % n) def sv_main(t_angle=0.2): verts_out = [] edges_out = [] in_sockets = [ ['s', 't_angle', t_angle] ] def produce(axiom, rules): output = "" for i in axiom: output = output + rules.get(i, i) return output def iterate(n, axiom, rules): if n > 0: axiom = produce(axiom, rules) return iterate(n - 1, axiom, rules) return axiom texts = bpy.data.texts f = texts['RULES'].as_string() rules = {} rules = ast.literal_eval(f) axiom = 'I' m = iterate(5, axiom, rules) ffff = 'poonjab' in globals() poonjab = Lturtle() poonjab.verts = [] poonjab.edges = [] poonjab.amp = t_angle poonjab.chomp(m) verts_out.extend(poonjab.verts) edges_out.extend(poonjab.edges) out_sockets = [ ['v', 'verts', [verts_out]], ['s', 'edges', [edges_out]] ] return in_sockets, out_sockets
gpl-3.0
-1,039,659,748,590,810,900
25.162698
83
0.502654
false
apple/swift-lldb
packages/Python/lldbsuite/test/commands/watchpoints/hello_watchlocation/TestWatchLocation.py
1
4428
""" Test lldb watchpoint that uses '-s size' to watch a pointed location with size. """ from __future__ import print_function import re import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class HelloWatchLocationTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Our simple source filename. self.source = 'main.cpp' # Find the line number to break inside main(). self.line = line_number( self.source, '// Set break point at this line.') # This is for verifying that watch location works. self.violating_func = "do_bad_thing_with_location" # Build dictionary to have unique executable names for each test # method. self.exe_name = self.testMethodName self.d = {'CXX_SOURCES': self.source, 'EXE': self.exe_name} @expectedFailureAll( oslist=["windows"], bugnumber="llvm.org/pr24446: WINDOWS XFAIL TRIAGE - Watchpoints not supported on Windows") # Most of the MIPS boards provide only one H/W watchpoints, and S/W # watchpoints are not supported yet @expectedFailureAll(triple=re.compile('^mips')) # SystemZ and PowerPC also currently supports only one H/W watchpoint @expectedFailureAll(archs=['powerpc64le', 's390x']) @expectedFailureNetBSD @skipIfDarwin def test_hello_watchlocation(self): """Test watching a location with '-s size' option.""" self.build(dictionary=self.d) self.setTearDownCleanup(dictionary=self.d) exe = self.getBuildArtifact(self.exe_name) self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Add a breakpoint to set a watchpoint when stopped on the breakpoint. lldbutil.run_break_set_by_file_and_line( self, None, self.line, num_expected_locations=1, loc_exact=False) # Run the program. self.runCmd("run", RUN_SUCCEEDED) # We should be stopped again due to the breakpoint. # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # Now let's set a write-type watchpoint pointed to by 'g_char_ptr'. self.expect( "watchpoint set expression -w write -s 1 -- g_char_ptr", WATCHPOINT_CREATED, substrs=[ 'Watchpoint created', 'size = 1', 'type = w']) # Get a hold of the watchpoint id just created, it is used later on to # match the watchpoint id which is expected to be fired. match = re.match( "Watchpoint created: Watchpoint (.*):", self.res.GetOutput().splitlines()[0]) if match: expected_wp_id = int(match.group(1), 0) else: self.fail("Grokking watchpoint id faailed!") self.runCmd("expr unsigned val = *g_char_ptr; val") self.expect(self.res.GetOutput().splitlines()[0], exe=False, endstr=' = 0') self.runCmd("watchpoint set expression -w write -s 4 -- &threads[0]") # Use the '-v' option to do verbose listing of the watchpoint. # The hit count should be 0 initially. self.expect("watchpoint list -v", substrs=['hit_count = 0']) self.runCmd("process continue") # We should be stopped again due to the watchpoint (write type), but # only once. The stop reason of the thread should be watchpoint. self.expect("thread list", STOPPED_DUE_TO_WATCHPOINT, substrs=['stopped', 'stop reason = watchpoint %d' % expected_wp_id]) # Switch to the thread stopped due to watchpoint and issue some # commands. self.switch_to_thread_with_stop_reason(lldb.eStopReasonWatchpoint) self.runCmd("thread backtrace") self.expect("frame info", substrs=[self.violating_func]) # Use the '-v' option to do verbose listing of the watchpoint. # The hit count should now be 1. self.expect("watchpoint list -v", substrs=['hit_count = 1']) self.runCmd("thread backtrace all")
apache-2.0
-3,877,249,198,937,931,000
38.185841
98
0.610659
false
hoburg/gpkit
docs/source/examples/boundschecking.py
1
1592
"Verifies that bounds are caught through monomials" from gpkit import Model, parse_variables from gpkit.exceptions import UnboundedGP, UnknownInfeasible class BoundsChecking(Model): """Implements a crazy set of unbounded variables. Variables --------- Ap [-] d D [-] e F [-] s mi [-] c mf [-] r T [-] i nu [-] p Fs 0.9 [-] t mb 0.4 [-] i rf 0.01 [-] o V 300 [-] n Upper Unbounded --------------- F Lower Unbounded --------------- D """ @parse_variables(__doc__, globals()) def setup(self): self.cost = F return [ F >= D + T, D == rf*V**2*Ap, Ap == nu, T == mf*V, mf >= mi + mb, mf == rf*V, Fs <= mi ] m = BoundsChecking() print(m.str_without(["lineage"])) try: m.solve() except UnboundedGP: gp = m.gp(checkbounds=False) missingbounds = gp.check_bounds() try: sol = gp.solve(verbosity=0) # Errors on mosek_cli except UnknownInfeasible: # pragma: no cover pass bpl = ", but would gain it from any of these sets: " assert missingbounds[(m.D.key, 'lower')] == bpl + "[(%s, 'lower')]" % m.Ap assert missingbounds[(m.nu.key, 'lower')] == bpl + "[(%s, 'lower')]" % m.Ap # ordering is arbitrary: assert missingbounds[(m.Ap.key, 'lower')] in ( bpl + ("[(%s, 'lower')] or [(%s, 'lower')]" % (m.D, m.nu)), bpl + ("[(%s, 'lower')] or [(%s, 'lower')]" % (m.nu, m.D)))
mit
1,190,010,389,241,384,700
23.492308
75
0.476131
false
daniel-noland/MemoryOracle
gdbwatch/gdbtest/mem/DynamicBreak.py
1
3858
#!/usr/bin/env python # -*- encoding UTF-8 -*- # THIS CODE DERIVED FORM cma.py import gdb import signal import re import threading from .Heap import Heap #----------------------------------------------------------------------- #Archs # TODO: Update all arch classes to use gdb.Architecture checks instead of this # hack class Arch(object): class x86_32(object): @staticmethod def is_current(): if gdb.execute("info reg", True, True).find("eax") >= 0: return True return False @staticmethod def get_arg(num): if num > 1: raise Exception("get_arg %d is not supported." %num) gdb.execute("up", False, True) ret = long(gdb.parse_and_eval( "*(unsigned int *)($esp + " + str(num * 4) + ")") ) gdb.execute("down", False, True) return ret @staticmethod def get_ret(): return long(gdb.parse_and_eval("$eax")) class x86_64(object): @staticmethod def is_current(): return gdb.newest_frame().architecture().name() == "i386:x86-64" @staticmethod def get_arg(num): if num == 0: return long(gdb.newest_frame().read_register("rdi")) elif num == 1: return long(gdb.newest_frame().read_register("rsi")) else: raise Exception("get_arg %d is not supported." %num) @staticmethod def get_ret(self): return long(gdb.newest_frame().read_register("rax")) class arm(object): @staticmethod def is_current(): if gdb.execute("info reg", True, True).find("cpsr") >= 0: return True return False @staticmethod def get_arg(num): if num == 0: return long(gdb.parse_and_eval("$r0")) elif num == 1: return long(gdb.parse_and_eval("$r1")) else: raise Exception("get_arg %d is not supported." %num) @staticmethod def get_ret(): return long(gdb.parse_and_eval("$r0")) archs = (Arch.x86_32, Arch.x86_64, Arch.arm) current = None for e in Arch.archs: if e.is_current(): Arch.current = e break else: raise Exception("Current architecture is not supported by MemoryOracle.") arch = Arch.current class BreakException(Exception): pass class DynamicBreak(gdb.Breakpoint): @staticmethod def _heap_track(ret, size): print("_tracked ", ret, size) gdb.execute("echo " + str(size) ) not_released_add(ret, size) @staticmethod def _heap_release(): print("_released ", arch.get_arg(0)) released_add(arch.get_arg(0)) class DynamicBreakAlloc(DynamicBreak): allocs = dict() def stop(self): size = arch.get_arg(0) fin = DynamicBreakAllocFinish() return False class DynamicBreakAllocFinish(gdb.FinishBreakpoint): def stop(self): print("finish return " + str(hex(arch.get_ret()))) return False class DynamicBreakCalloc(DynamicBreak): def event(self): size = arch.get_arg(0) * arch.get_arg(1) DynamicBreak._disable_finish_enable() self._heap_track(arch.get_ret(), size) class DynamicBreakRealloc(DynamicBreak): def event(self): super()._heap_release() size = arch.get_arg(1) DynamicBreak._disable_finish_enable() super()._heap_track(arch.get_ret(), size) class DynamicBreakRelease(DynamicBreak): def event(self): super()._heap_release() DynamicBreak._disable_finish_enable() b = DynamicBreakAlloc("operator new", gdb.BP_BREAKPOINT, gdb.WP_READ, True) print("hello")
lgpl-3.0
-5,980,768,733,821,877,000
24.215686
81
0.550544
false
sha-red/django-shared-utils
shared/utils/fields.py
1
1882
# -*- coding: utf-8 -*- from __future__ import unicode_literals import re from .text import slugify # TODO Remove deprecated location from .models.slugs import AutoSlugField def uniquify_field_value(instance, field_name, value, max_length=None, queryset=None): """ Makes a char field value unique by appending an index, taking care of the field's max length. FIXME Doesn't work with model inheritance, where the field is part of the parent class. """ def get_similar_values(value): return queryset.exclude(pk=instance.pk) \ .filter(**{"%s__istartswith" % field_name: value}).values_list(field_name, flat=True) if not value: raise ValueError("Cannot uniquify empty value") # TODO Instead get value from instance.field, or use a default value? if not max_length: max_length = instance._meta.get_field(field_name).max_length if not queryset: queryset = instance._meta.default_manager.get_queryset() # Find already existing counter m = re.match(r'(.+)(-\d+)$', value) if m: base_value, counter = m.groups() index = int(counter.strip("-")) + 1 else: base_value = value index = 2 # Begin appending "-2" similar_values = get_similar_values(value) while value in similar_values or len(value) > max_length: value = "%s-%i" % (base_value, index) if len(value) > max_length: base_value = base_value[:-(len(value) - max_length)] value = "%s-%i" % (base_value, index) similar_values = get_similar_values(base_value) index += 1 return value # TODO Remove alias def unique_slug(instance, slug_field, slug_value, max_length=50, queryset=None): slug_value = slugify(slug_value) return uniquify_field_value(instance, slug_field, slug_value, max_length=50, queryset=None)
mit
-4,836,092,466,750,654,000
33.218182
97
0.644527
false
HubbleStack/Hubble
hubblestack/extmods/returners/splunk_pulsar_return.py
1
13369
# -*- encoding: utf-8 -*- ''' HubbleStack Pulsar-to-Splunk returner Deliver HubbleStack Pulsar event data into Splunk using the HTTP event collector. Required config/pillar settings: .. code-block:: yaml hubblestack: returner: splunk: - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX indexer: splunk-indexer.domain.tld index: hubble sourcetype_pulsar: hubble_fim You can also add a `custom_fields` argument which is a list of keys to add to events with using the results of config.get(<custom_field>). These new keys will be prefixed with 'custom_' to prevent conflicts. The values of these keys should be strings or lists (will be sent as CSV string), do not choose grains or pillar values with complex values or they will be skipped. Additionally, you can define a fallback_indexer which will be used if a default gateway is not defined. .. code-block:: yaml hubblestack: returner: splunk: - token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX indexer: splunk-indexer.domain.tld index: hubble sourcetype_pulsar: hubble_fim fallback_indexer: splunk-indexer.loc.domain.tld custom_fields: - site - product_group ''' import socket # Imports for http event forwarder import requests import json import os import time from collections import defaultdict from hubblestack.hec import http_event_collector, get_splunk_options, make_hec_args import logging RETRY = False log = logging.getLogger(__name__) def returner(ret): try: if isinstance(ret, dict) and not ret.get('return'): # Empty single return, let's not do any setup or anything return opts_list = get_splunk_options( sourcetype_pulsar='hubble_fim', _nick={'sourcetype_pulsar': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend(__opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) # Check whether or not data is batched: if isinstance(ret, dict): # Batching is disabled data = [ret] else: data = ret # Sometimes there are duplicate events in the list. Dedup them: data = _dedupList(data) minion_id = __opts__['id'] jid = ret['jid'] global RETRY RETRY = ret['retry'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id master = __grains__['master'] try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception('No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = ['localhost', 'localhost.localdomain', 'localhost6.localdomain6'] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn # Get cloud details cloud_details = __grains__.get('cloud_details', {}) alerts = [] for item in data: events = item['return'] if not isinstance(events, list): events = [events] alerts.extend(events) for alert in alerts: event = {} payload = {} if('change' in alert): # Linux, normal pulsar # The second half of the change will be '|IN_ISDIR' for directories change = alert['change'].split('|')[0] # Skip the IN_IGNORED events if change == 'IN_IGNORED': continue if len(alert['change'].split('|')) == 2: object_type = 'directory' else: object_type = 'file' actions = defaultdict(lambda: 'unknown') actions['IN_ACCESS'] = 'read' actions['IN_ATTRIB'] = 'acl_modified' actions['IN_CLOSE_NOWRITE'] = 'read' actions['IN_CLOSE_WRITE'] = 'read' actions['IN_CREATE'] = 'created' actions['IN_DELETE'] = 'deleted' actions['IN_DELETE_SELF'] = 'deleted' actions['IN_MODIFY'] = 'modified' actions['IN_MOVE_SELF'] = 'modified' actions['IN_MOVED_FROM'] = 'modified' actions['IN_MOVED_TO'] = 'modified' actions['IN_OPEN'] = 'read' actions['IN_MOVE'] = 'modified' actions['IN_CLOSE'] = 'read' event['action'] = actions[change] event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['path'] event['file_name'] = alert['name'] event['file_path'] = alert['tag'] event['pulsar_config'] = alert['pulsar_config'] if 'contents' in alert: event['contents'] = alert['contents'] if alert['stats']: # Gather more data if the change wasn't a delete stats = alert['stats'] event['object_id'] = stats['inode'] event['file_acl'] = stats['mode'] event['file_create_time'] = stats['ctime'] event['file_modify_time'] = stats['mtime'] event['file_size'] = stats['size'] / 1024.0 # Convert bytes to kilobytes event['user'] = stats['user'] event['group'] = stats['group'] if object_type == 'file': chk = alert.get('checksum') if chk: event['file_hash'] = chk event['file_hash_type'] = alert.get('checksum_type', 'unknown') else: # Windows, win_pulsar if alert.get('Accesses', None): change = alert['Accesses'] if alert['Hash'] == 'Item is a directory': object_type = 'directory' else: object_type = 'file' else: change = alert['Reason'] object_type = 'file' actions = defaultdict(lambda: 'unknown') actions['Delete'] = 'deleted' actions['Read Control'] = 'read' actions['Write DAC'] = 'acl_modified' actions['Write Owner'] = 'modified' actions['Synchronize'] = 'modified' actions['Access Sys Sec'] = 'read' actions['Read Data'] = 'read' actions['Write Data'] = 'modified' actions['Append Data'] = 'modified' actions['Read EA'] = 'read' actions['Write EA'] = 'modified' actions['Execute/Traverse'] = 'read' actions['Read Attributes'] = 'read' actions['Write Attributes'] = 'acl_modified' actions['Query Key Value'] = 'read' actions['Set Key Value'] = 'modified' actions['Create Sub Key'] = 'created' actions['Enumerate Sub-Keys'] = 'read' actions['Notify About Changes to Keys'] = 'read' actions['Create Link'] = 'created' actions['Print'] = 'read' actions['Basic info change'] = 'modified' actions['Compression change'] = 'modified' actions['Data extend'] = 'modified' actions['EA change'] = 'modified' actions['File create'] = 'created' actions['File delete'] = 'deleted' if alert.get('Accesses', None): event['action'] = actions[change] event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['Object Name'] event['file_name'] = os.path.basename(alert['Object Name']) event['file_path'] = os.path.dirname(alert['Object Name']) event['pulsar_config'] = alert['pulsar_config'] # TODO: Should we be reporting 'EntryType' or 'TimeGenerated? # EntryType reports whether attempt to change was successful. else: for c in change: if not event.get('action', None): event['action'] = actions.get(c, c) else: event['action'] += ', ' + actions.get(c, c) event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['Full path'] event['file_name'] = alert['File name'] event['file_path'] = alert['tag'] event['pulsar_config'] = alert.get('pulsar_config', 'hubblestack_pulsar_win_config.yaml') event['TimeGenerated'] = alert['Time stamp'] chk = alert.get('checksum') if chk: event['file_hash'] = chk event['file_hash_type'] = alert.get('checksum_type', 'unknown') event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update({'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'index': opts['index']}) payload.update({'sourcetype': opts['sourcetype']}) # Remove any empty fields from the event payload remove_keys = [k for k in event if event[k] == ""] for k in remove_keys: del event[k] payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)): fields[item] = str(payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_pulsar_return') return def _dedupList(l): deduped = [] for i, x in enumerate(l): if x not in l[i + 1:]: deduped.append(x) return deduped
apache-2.0
-5,617,588,501,900,962,000
42.689542
112
0.480814
false
gnarayan/WDmodel
WDmodel/main.py
1
11665
# -*- coding: UTF-8 -*- """ The WDmodel package is designed to infer the SED of DA white dwarfs given spectra and photometry. This main module wraps all the other modules, and their classes and methods to implement the alogrithm. """ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import sys import mpi4py import numpy as np from . import io from . import WDmodel from . import passband from . import covariance from . import fit from . import viz sys_excepthook = sys.excepthook def mpi_excepthook(excepttype, exceptvalue, traceback): """ Overload :py:func:`sys.excepthook` when using :py:class:`mpi4py.MPI` to terminate all MPI processes when an Exception is raised. """ sys_excepthook(excepttype, exceptvalue, traceback) mpi4py.MPI.COMM_WORLD.Abort(1) def main(inargs=None): """ Entry point for the :py:mod:`WDmodel` fitter package. Parameters ---------- inargs : dict, optional Input arguments to configure the fit. If not specified :py:data:`sys.argv` is used. inargs must be parseable by :py:func:`WDmodel.io.get_options`. Raises ------ RuntimeError If user attempts to resume the fit without having run it first Notes ----- The package is structured into several modules and classes ================================================= =================== Module Model Component ================================================= =================== :py:mod:`WDmodel.io` I/O methods :py:class:`WDmodel.WDmodel.WDmodel` SED generator :py:mod:`WDmodel.passband` Throughput model :py:class:`WDmodel.covariance.WDmodel_CovModel` Noise model :py:class:`WDmodel.likelihood.WDmodel_Likelihood` Likelihood function :py:class:`WDmodel.likelihood.WDmodel_Posterior` Posterior function :py:mod:`WDmodel.fit` "Fitting" methods :py:mod:`WDmodel.viz` Viz methods ================================================= =================== This method implements our algorithm to infer the DA White Dwarf properties and construct the SED model given the data using the methods and classes listed above. Once the data is read, the model is configured, and the liklihood and posterior functions constructed, the fitter methods evaluate the model parameters given the data, using the samplers in :py:mod:`emcee`. :py:mod:`WDmodel.mossampler` provides an overloaded :py:class:`emcee.PTSampler` with a more reliable auto-correlation estimate. Finally, the result is output along with various plots. """ comm = mpi4py.MPI.COMM_WORLD size = comm.Get_size() if size > 1: # force all MPI processes to terminate if we are running with --mpi and an exception is raised sys.excepthook = mpi_excepthook if inargs is None: inargs = sys.argv[1:] # parse the arguments args, pool= io.get_options(inargs, comm) specfile = args.specfile spectable = args.spectable lamshift = args.lamshift vel = args.vel bluelim, redlim = args.trimspec rebin = args.rebin rescale = args.rescale blotch = args.blotch outdir = args.outdir outroot = args.outroot photfile = args.photfile rvmodel = args.reddeningmodel phot_dispersion = args.phot_dispersion pbfile = args.pbfile excludepb = args.excludepb ignorephot= args.ignorephot covtype = args.covtype coveps = args.coveps samptype = args.samptype ascale = args.ascale ntemps = args.ntemps nwalkers = args.nwalkers nburnin = args.nburnin nprod = args.nprod everyn = args.everyn thin = args.thin redo = args.redo resume = args.resume discard = args.discard balmer = args.balmerlines ndraws = args.ndraws savefig = args.savefig ##### SETUP ##### # set the object name and create output directories objname, outdir = io.set_objname_outdir_for_specfile(specfile, outdir=outdir, outroot=outroot,\ redo=redo, resume=resume) message = "Writing to outdir {}".format(outdir) print(message) # init the model model = WDmodel.WDmodel(rvmodel=rvmodel) if not resume: # parse the parameter keywords in the argparse Namespace into a dictionary params = io.get_params_from_argparse(args) # get resolution - by default, this is None, since it depends on instrument settings for each spectra # we can look it up from a lookup table provided by Tom Matheson for our spectra # a custom argument from the command line overrides the lookup fwhm = params['fwhm']['value'] fwhm, lamshift = io.get_spectrum_resolution(specfile, spectable, fwhm=fwhm, lamshift=lamshift) params['fwhm']['value'] = fwhm # read spectrum spec = io.read_spec(specfile) # pre-process spectrum out = fit.pre_process_spectrum(spec, bluelim, redlim, model, params,\ rebin=rebin, lamshift=lamshift, vel=vel, blotch=blotch, rescale=rescale) spec, cont_model, linedata, continuumdata, scale_factor, params = out # get photometry if not ignorephot: phot = io.get_phot_for_obj(objname, photfile) else: params['mu']['value'] = 0. params['mu']['fixed'] = True phot = None # exclude passbands that we want excluded pbnames = [] if phot is not None: pbnames = np.unique(phot.pb) if excludepb is not None: pbnames = list(set(pbnames) - set(excludepb)) # filter the photometry recarray to use only the passbands we want useind = [x for x, pb in enumerate(phot.pb) if pb in pbnames] useind = np.array(useind) phot = phot.take(useind) # set the pbnames from the trimmed photometry recarray to preserve order pbnames = list(phot.pb) # if we cut out out all the passbands, force mu to be fixed if len(pbnames) == 0: params['mu']['value'] = 0. params['mu']['fixed'] = True phot = None # save the inputs to the fitter outfile = io.get_outfile(outdir, specfile, '_inputs.hdf5', check=True, redo=redo, resume=resume) io.write_fit_inputs(spec, phot, cont_model, linedata, continuumdata,\ rvmodel, covtype, coveps, phot_dispersion, scale_factor, outfile) else: outfile = io.get_outfile(outdir, specfile, '_inputs.hdf5', check=False, redo=redo, resume=resume) try: spec, cont_model, linedata, continuumdata, phot, fit_config = io.read_fit_inputs(outfile) except IOError as e: message = '{}\nMust run fit to generate inputs before attempting to resume'.format(e) raise RuntimeError(message) rvmodel = fit_config['rvmodel'] covtype = fit_config['covtype'] coveps = fit_config['coveps'] scale_factor = fit_config['scale_factor'] phot_dispersion = fit_config['phot_dispersion'] if phot is not None: pbnames = list(phot.pb) else: pbnames = [] # get the throughput model pbs = passband.get_pbmodel(pbnames, model, pbfile=pbfile) ##### MINUIT ##### outfile = io.get_outfile(outdir, specfile, '_params.json', check=True, redo=redo, resume=resume) if not resume: # to avoid minuit messing up inputs, it can be skipped entirely to force the MCMC to start at a specific position if not args.skipminuit: # do a quick fit to refine the input params migrad_params = fit.quick_fit_spec_model(spec, model, params) # save the minuit fit result - this will not be perfect, but if it's bad, refine starting position viz.plot_minuit_spectrum_fit(spec, objname, outdir, specfile, scale_factor,\ model, migrad_params, save=True) else: # we didn't run minuit, so we'll assume the user intended to start us at some specific position migrad_params = io.copy_params(params) if covtype == 'White': migrad_params['fsig']['value'] = 0. migrad_params['fsig']['fixed'] = True migrad_params['tau']['fixed'] = True # If we don't have a user supplied initial guess of mu, get a guess migrad_params = fit.hyper_param_guess(spec, phot, model, pbs, migrad_params) # write out the migrad params - note that if you skipminuit, you are expected to provide the dl value # if skipmcmc is set, you can now run the code with MPI io.write_params(migrad_params, outfile) else: try: migrad_params = io.read_params(outfile) except (OSError,IOError) as e: message = '{}\nMust run fit to generate inputs before attempting to resume'.format(e) raise RuntimeError(message) # init a covariance model instance that's used to model the residuals # between the systematic residuals between data and model errscale = np.median(spec.flux_err) covmodel = covariance.WDmodel_CovModel(errscale, covtype, coveps) ##### MCMC ##### # skipmcmc can be run to just prepare the inputs if not args.skipmcmc: # do the fit result = fit.fit_model(spec, phot, model, covmodel, pbs, migrad_params,\ objname, outdir, specfile,\ phot_dispersion=phot_dispersion,\ samptype=samptype, ascale=ascale,\ ntemps=ntemps, nwalkers=nwalkers, nburnin=nburnin, nprod=nprod,\ thin=thin, everyn=everyn,\ redo=redo, resume=resume,\ pool=pool) param_names, samples, samples_lnprob, everyn, shape = result ntemps, nwalkers, nprod, nparam = shape mcmc_params = io.copy_params(migrad_params) # parse the samples in the chain and get the result result = fit.get_fit_params_from_samples(param_names, samples, samples_lnprob, mcmc_params,\ ntemps=ntemps, nwalkers=nwalkers, nprod=nprod, discard=discard) mcmc_params, in_samp, in_lnprob = result # write the result to a file outfile = io.get_outfile(outdir, specfile, '_result.json') io.write_params(mcmc_params, outfile) # plot the MCMC output plot_out = viz.plot_mcmc_model(spec, phot, linedata,\ scale_factor, phot_dispersion,\ objname, outdir, specfile,\ model, covmodel, cont_model, pbs,\ mcmc_params, param_names, in_samp, in_lnprob,\ covtype=covtype, balmer=balmer,\ ndraws=ndraws, everyn=everyn, savefig=savefig) model_spec, full_mod, model_mags = plot_out spec_model_file = io.get_outfile(outdir, specfile, '_spec_model.dat') io.write_spectrum_model(spec, model_spec, spec_model_file) full_model_file = io.get_outfile(outdir, specfile, '_full_model.hdf5') io.write_full_model(full_mod, full_model_file) if phot is not None: phot_model_file = io.get_outfile(outdir, specfile, '_phot_model.dat') io.write_phot_model(phot, model_mags, phot_model_file) return
gpl-3.0
8,631,514,802,882,615,000
37.754153
121
0.61123
false
drfraser/django-paypal
paypal/standard/forms.py
1
10801
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging from django import forms from django.conf import settings from django.utils.safestring import mark_safe from django.utils import timezone from paypal.standard.widgets import ValueHiddenInput, ReservedValueHiddenInput from paypal.standard.conf import (POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT, IMAGE, SUBSCRIPTION_IMAGE, DONATION_IMAGE, SANDBOX_IMAGE, SUBSCRIPTION_SANDBOX_IMAGE, DONATION_SANDBOX_IMAGE) log = logging.getLogger(__name__) # 20:18:05 Jan 30, 2009 PST - PST timezone support is not included out of the box. # PAYPAL_DATE_FORMAT = ("%H:%M:%S %b. %d, %Y PST", "%H:%M:%S %b %d, %Y PST",) # PayPal dates have been spotted in the wild with these formats, beware! PAYPAL_DATE_FORMATS = ["%H:%M:%S %b. %d, %Y PST", "%H:%M:%S %b. %d, %Y PDT", "%H:%M:%S %b %d, %Y PST", "%H:%M:%S %b %d, %Y PDT", "%H:%M:%S %d %b %Y PST", # IPN Tester "%H:%M:%S %d %b %Y PDT", # formats ] class PayPalDateTimeField(forms.DateTimeField): input_formats = PAYPAL_DATE_FORMATS def strptime(self, value, format): dt = super(PayPalDateTimeField, self).strptime(value, format) parts = format.split(" ") if timezone.pytz and settings.USE_TZ: if parts[-1] in ["PDT", "PST"]: # PST/PDT is 'US/Pacific' dt = timezone.make_aware(dt, timezone.pytz.timezone('US/Pacific')) return dt class PayPalPaymentsForm(forms.Form): """ Creates a PayPal Payments Standard "Buy It Now" button, configured for a selling a single item with no shipping. For a full overview of all the fields you can set (there is a lot!) see: http://tinyurl.com/pps-integration Usage: >>> f = PayPalPaymentsForm(initial={'item_name':'Widget 001', ...}) >>> f.render() u'<form action="https://www.paypal.com/cgi-bin/webscr" method="post"> ...' """ CMD_CHOICES = ( ("_xclick", "Buy now or Donations"), ("_donations", "Donations"), ("_cart", "Shopping cart"), ("_xclick-subscriptions", "Subscribe") ) SHIPPING_CHOICES = ((1, "No shipping"), (0, "Shipping")) NO_NOTE_CHOICES = ((1, "No Note"), (0, "Include Note")) RECURRING_PAYMENT_CHOICES = ( (1, "Subscription Payments Recur"), (0, "Subscription payments do not recur") ) REATTEMPT_ON_FAIL_CHOICES = ( (1, "reattempt billing on Failure"), (0, "Do Not reattempt on failure") ) BUY = 'buy' SUBSCRIBE = 'subscribe' DONATE = 'donate' # Where the money goes. business = forms.CharField(widget=ValueHiddenInput(), initial=settings.PAYPAL_RECEIVER_EMAIL) # Item information. amount = forms.IntegerField(widget=ValueHiddenInput()) item_name = forms.CharField(widget=ValueHiddenInput()) item_number = forms.CharField(widget=ValueHiddenInput()) quantity = forms.CharField(widget=ValueHiddenInput()) # Subscription Related. a1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 Price p1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 Duration t1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 unit of Duration, default to Month a2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 Price p2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 Duration t2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 unit of Duration, default to Month a3 = forms.CharField(widget=ValueHiddenInput()) # Subscription Price p3 = forms.CharField(widget=ValueHiddenInput()) # Subscription Duration t3 = forms.CharField(widget=ValueHiddenInput()) # Subscription unit of Duration, default to Month src = forms.CharField(widget=ValueHiddenInput()) # Is billing recurring? default to yes sra = forms.CharField(widget=ValueHiddenInput()) # Reattempt billing on failed cc transaction no_note = forms.CharField(widget=ValueHiddenInput()) # Can be either 1 or 2. 1 = modify or allow new subscription creation, 2 = modify only modify = forms.IntegerField(widget=ValueHiddenInput()) # Are we modifying an existing subscription? # Localization / PayPal Setup lc = forms.CharField(widget=ValueHiddenInput()) page_style = forms.CharField(widget=ValueHiddenInput()) cbt = forms.CharField(widget=ValueHiddenInput()) # IPN control. notify_url = forms.CharField(widget=ValueHiddenInput()) cancel_return = forms.CharField(widget=ValueHiddenInput()) return_url = forms.CharField(widget=ReservedValueHiddenInput(attrs={"name": "return"})) custom = forms.CharField(widget=ValueHiddenInput()) invoice = forms.CharField(widget=ValueHiddenInput()) # Default fields. cmd = forms.ChoiceField(widget=forms.HiddenInput(), initial=CMD_CHOICES[0][0]) charset = forms.CharField(widget=forms.HiddenInput(), initial="utf-8") currency_code = forms.CharField(widget=forms.HiddenInput(), initial="USD") no_shipping = forms.ChoiceField(widget=forms.HiddenInput(), choices=SHIPPING_CHOICES, initial=SHIPPING_CHOICES[0][0]) def __init__(self, button_type="buy", *args, **kwargs): super(PayPalPaymentsForm, self).__init__(*args, **kwargs) self.button_type = button_type if 'initial' in kwargs: # Dynamically create, so we can support everything PayPal does. for k, v in kwargs['initial'].items(): if k not in self.base_fields: self.fields[k] = forms.CharField(label=k, widget=ValueHiddenInput(), initial=v) def test_mode(self): return getattr(settings, 'PAYPAL_TEST', True) def get_endpoint(self): "Returns the endpoint url for the form." if self.test_mode(): return SANDBOX_POSTBACK_ENDPOINT else: return POSTBACK_ENDPOINT def render(self): return mark_safe(u"""<form action="%s" method="post"> %s <input type="image" src="%s" border="0" name="submit" alt="Buy it Now" /> </form>""" % (self.get_endpoint(), self.as_p(), self.get_image())) def sandbox(self): "Deprecated. Use self.render() instead." import warnings warnings.warn("""PaypalPaymentsForm.sandbox() is deprecated. Use the render() method instead.""", DeprecationWarning) return self.render() def get_image(self): return { (True, self.SUBSCRIBE): SUBSCRIPTION_SANDBOX_IMAGE, (True, self.BUY): SANDBOX_IMAGE, (True, self.DONATE): DONATION_SANDBOX_IMAGE, (False, self.SUBSCRIBE): SUBSCRIPTION_IMAGE, (False, self.BUY): IMAGE, (False, self.DONATE): DONATION_IMAGE, }[self.test_mode(), self.button_type] def is_transaction(self): return not self.is_subscription() def is_donation(self): return self.button_type == self.DONATE def is_subscription(self): return self.button_type == self.SUBSCRIBE class PayPalEncryptedPaymentsForm(PayPalPaymentsForm): """ Creates a PayPal Encrypted Payments "Buy It Now" button. Requires the M2Crypto package. Based on example at: http://blog.mauveweb.co.uk/2007/10/10/paypal-with-django/ """ def _encrypt(self): """Use your key thing to encrypt things.""" from M2Crypto import BIO, SMIME, X509 # @@@ Could we move this to conf.py? CERT = settings.PAYPAL_PRIVATE_CERT PUB_CERT = settings.PAYPAL_PUBLIC_CERT PAYPAL_CERT = settings.PAYPAL_CERT CERT_ID = settings.PAYPAL_CERT_ID # Iterate through the fields and pull out the ones that have a value. plaintext = 'cert_id=%s\n' % CERT_ID for name, field in self.fields.items(): value = None if name in self.initial: value = self.initial[name] elif field.initial is not None: value = field.initial if value is not None: # @@@ Make this less hackish and put it in the widget. if name == "return_url": name = "return" plaintext += u'%s=%s\n' % (name, value) plaintext = plaintext.encode('utf-8') # Begin crypto weirdness. s = SMIME.SMIME() s.load_key_bio(BIO.openfile(CERT), BIO.openfile(PUB_CERT)) p7 = s.sign(BIO.MemoryBuffer(plaintext), flags=SMIME.PKCS7_BINARY) x509 = X509.load_cert_bio(BIO.openfile(PAYPAL_CERT)) sk = X509.X509_Stack() sk.push(x509) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp = BIO.MemoryBuffer() p7.write_der(tmp) p7 = s.encrypt(tmp, flags=SMIME.PKCS7_BINARY) out = BIO.MemoryBuffer() p7.write(out) return out.read() def as_p(self): return mark_safe(u""" <input type="hidden" name="cmd" value="_s-xclick" /> <input type="hidden" name="encrypted" value="%s" /> """ % self._encrypt()) class PayPalSharedSecretEncryptedPaymentsForm(PayPalEncryptedPaymentsForm): """ Creates a PayPal Encrypted Payments "Buy It Now" button with a Shared Secret. Shared secrets should only be used when your IPN endpoint is on HTTPS. Adds a secret to the notify_url based on the contents of the form. """ def __init__(self, *args, **kwargs): "Make the secret from the form initial data and slip it into the form." from paypal.standard.helpers import make_secret super(PayPalSharedSecretEncryptedPaymentsForm, self).__init__(*args, **kwargs) # @@@ Attach the secret parameter in a way that is safe for other query params. secret_param = "?secret=%s" % make_secret(self) # Initial data used in form construction overrides defaults if 'notify_url' in self.initial: self.initial['notify_url'] += secret_param else: self.fields['notify_url'].initial += secret_param class PayPalStandardBaseForm(forms.ModelForm): """Form used to receive and record PayPal IPN/PDT.""" # PayPal dates have non-standard formats. time_created = PayPalDateTimeField(required=False) payment_date = PayPalDateTimeField(required=False) next_payment_date = PayPalDateTimeField(required=False) subscr_date = PayPalDateTimeField(required=False) subscr_effective = PayPalDateTimeField(required=False) retry_at = PayPalDateTimeField(required=False) case_creation_date = PayPalDateTimeField(required=False) auction_closing_date = PayPalDateTimeField(required=False)
mit
-8,756,174,441,328,358,000
39.912879
103
0.634386
false
CaliopeProject/CaliopeServer
src/cid/forms/siim2/Company/models.py
1
1821
# -*- encoding: utf-8 -*- """ @authors: Nelson Daniel Ochoa [email protected] Sebastián Ortiz V. [email protected] @license: GNU AFFERO GENERAL PUBLIC LICENSE SIIM Models are the data definition of SIIM2 Framework Copyright (C) 2013 Infometrika Ltda. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ #Caliope Entities from cid.core.forms import FormNode from cid.core.entities import (VersionedNode, ZeroOrMore, RelationshipTo, StringProperty) class Company(FormNode): #: Número de documento o identificacion number_identification = StringProperty() #: Dígito de verificacion del numero de identificacion digit_verification = StringProperty() #: Nombre o razón social name = StringProperty() #: Sigla initial = StringProperty() #: Representante legal legal_representative = RelationshipTo(VersionedNode, 'IS_IN', cardinality=ZeroOrMore) #: Teléfono telephone = StringProperty() #: Dirección address = RelationshipTo(VersionedNode, 'IS_IN', cardinality=ZeroOrMore) #: Correo electrónico email = StringProperty()
agpl-3.0
-4,010,117,498,692,730,000
36.040816
89
0.698456
false
jdotjdot/django-apptemplates
apptemplates/__init__.py
1
2709
""" Django template loader that allows you to load a template from a specific Django application. """ from os.path import dirname, join, abspath from django.conf import settings from django.template.base import Origin from django.template.loaders.filesystem import Loader as FilesystemLoader try: from importlib import import_module # noqa pylint: disable=wrong-import-order,no-name-in-module except ImportError: # Python < 2.7 from django.utils.importlib import import_module # noqa pylint: disable=no-name-in-module,import-error import django _cache = {} def get_app_template_dir(app_name): """ Get the template directory for an application We do not use django.db.models.get_app, because this will fail if an app does not have any models. Returns a full path, or None if the app was not found. """ if app_name in _cache: return _cache[app_name] template_dir = None for app in settings.INSTALLED_APPS: if app.split('.')[-1] == app_name: # Do not hide import errors; these should never happen at this # point anyway mod = import_module(app) template_dir = join(abspath(dirname(mod.__file__)), 'templates') break _cache[app_name] = template_dir return template_dir if django.VERSION[:2] >= (1, 9): def get_template_path(template_dir, template_name, loader=None): """Return Origin object with template file path""" return Origin(name=join(template_dir, template_name), template_name=template_name, loader=loader) else: def get_template_path(template_dir, template_name, loader=None): """Return template file path (for Django < 1.9)""" _ = loader # noqa return join(template_dir, template_name) class Loader(FilesystemLoader): """ FilesystemLoader for templates of a Django app """ is_usable = True def get_template_sources(self, template_name, template_dirs=None): """ Return the absolute paths to "template_name" in the specified app If the name does not contain an app name (no colon), an empty list is returned. The parent FilesystemLoader.load_template_source() will take care of the actual loading for us. """ if ':' not in template_name: return [] app_name, template_name = template_name.split(":", 1) template_dir = get_app_template_dir(app_name) if template_dir: return [get_template_path(template_dir, template_name, self)] else: return []
mit
-5,958,066,998,723,027,000
34.12
107
0.627907
false
browseinfo/odoo_saas3_nicolas
addons/website_quote/controllers/main.py
1
8818
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.addons.web import http from openerp.addons.web.http import request from openerp.addons.website.models import website import werkzeug import datetime import time from openerp.tools.translate import _ class sale_quote(http.Controller): @http.route([ "/quote/<int:order_id>", "/quote/<int:order_id>/<token>" ], type='http', auth="public", website=True) def view(self, order_id, token=None, message=False, **post): # use SUPERUSER_ID allow to access/view order for public user # only if he knows the private token order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id) now = time.strftime('%Y-%m-%d') if token: if token != order.access_token: return request.website.render('website.404') # Log only once a day if request.httprequest.session.get('view_quote',False)!=now: request.httprequest.session['view_quote'] = now body=_('Quotation viewed by customer') self.__message_post(body, order_id, type='comment') days = 0 if order.validity_date: days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1 values = { 'quotation': order, 'message': message and int(message) or False, 'option': bool(filter(lambda x: not x.line_id, order.options)), 'order_valid': (not order.validity_date) or (now <= order.validity_date), 'days_valid': max(days, 0) } return request.website.render('website_quote.so_quotation', values) @http.route(['/quote/accept'], type='json', auth="public", website=True) def accept(self, order_id=None, token=None, signer=None, sign=None, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') attachments=sign and [('signature.png', sign.decode('base64'))] or [] order_obj.signal_order_confirm(request.cr, SUPERUSER_ID, [order_id], context=request.context) message = _('Order signed by %s') % (signer,) self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments) return True @http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True) def decline(self, order_id, token, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id]) message = post.get('decline_message') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token)) @http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True) def post(self, order_id, token, **post): # use SUPERUSER_ID allow to access/view order for public user order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) message = post.get('comment') if token != order.access_token: return request.website.render('website.404') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token)) def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]): request.session.body = message cr, uid, context = request.cr, request.uid, request.context user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context) if 'body' in request.session and request.session.body: request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id, body=request.session.body, type=type, subtype=subtype, author_id=user.partner_id.id, context=context, attachments=attachments ) request.session.body = False return True @http.route(['/quote/update_line'], type='json', auth="public", website=True) def update(self, line_id=None, remove=False, unlink=False, order_id=None, token=None, **post): order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id)) if token != order.access_token: return request.website.render('website.404') if order.state not in ('draft','sent'): return False line_id=int(line_id) if unlink: request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context) return False number=(remove and -1 or 1) order_line_obj = request.registry.get('sale.order.line') order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0] quantity = order_line_val['product_uom_qty'] + number order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context) return [str(quantity), str(order.amount_total)] @http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True, multilang=True) def template_view(self, quote, **post): values = { 'template': quote } return request.website.render('website_quote.so_template', values) @http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True) def add(self, option_id, order_id, token, **post): vals = {} order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') option_obj = request.registry.get('sale.order.option') option = option_obj.browse(request.cr, SUPERUSER_ID, option_id) res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id, False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id, option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'), False, order.fiscal_position.id, True, request.context) vals = res.get('value', {}) if 'tax_id' in vals: vals['tax_id'] = [(6, 0, vals['tax_id'])] vals.update({ 'price_unit': option.price_unit, 'website_description': option.website_description, 'name': option.name, 'order_id': order.id, 'product_id' : option.product_id.id, 'product_uos_qty': option.quantity, 'product_uos': option.uom_id.id, 'product_uom_qty': option.quantity, 'product_uom': option.uom_id.id, 'discount': option.discount, }) line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context) option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context) return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
agpl-3.0
2,026,008,436,371,959,300
50.567251
129
0.61658
false
dgoodwin/rounder
src/rounder/ui/gtk/main.py
1
11733
# encoding=utf-8 # # Rounder - Poker for the GNOME Desktop # # Copyright (C) 2008 Devan Goodwin <[email protected]> # Copyright (C) 2008 James Bowes <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA """ The Rounder GTK Client """ import subprocess import pygtk pygtk.require('2.0') import gtk import gtk.glade from logging import getLogger logger = getLogger("rounder.ui.gtk.main") from twisted.internet import gtk2reactor gtk2reactor.install() from rounder.network.client import RounderNetworkClient from rounder.network.serialize import register_message_classes from rounder.ui.client import Client from rounder.ui.gtk.util import find_file_on_path from rounder.ui.gtk.table import TableWindow ROUNDER_LOGO_FILE = "rounder/ui/gtk/data/rounder-logo.png" ROUNDER_ICON_FILE = "rounder/ui/gtk/data/rounder-icon.svg" def connect(host, port, username, password, app): # Attempt to connect to the specified server by creating a client # object. If successful pass the client back to the main application, # otherwise display an error status message and let the user try # again: client = RounderNetworkClient(app) try: client.connect(host, port, username, password) except Exception, e: logger.error("Unable to login to %s as %s" % (host, username)) class RounderGtk(Client): """ The Rounder GTK Client Represents the main Rounder interface to connect to a server, view available tables, and join them. (opening a separate window) """ def __init__(self, host=None, port=None, username=None, password=None): logger.info("Starting rounder.") logger.debug("Initial connection Info:\n" " host = %s\n" " port = %s\n" " username = %s\n" " password = %s", host, port, username, password) register_message_classes() glade_file = 'rounder/ui/gtk/data/rounder.glade' self.glade_xml = gtk.glade.XML(find_file_on_path(glade_file)) main_window = self.glade_xml.get_widget('main-window') main_window.set_icon_from_file(find_file_on_path(ROUNDER_ICON_FILE)) self.table_list = self.glade_xml.get_widget('table-list') self.statusbar = self.glade_xml.get_widget('statusbar') self.connect_button = self.glade_xml.get_widget('connect-button') logo = self.glade_xml.get_widget("rounder-logo-image") logo.set_from_file(find_file_on_path(ROUNDER_LOGO_FILE)) signals = { 'on_connect_activate': self.show_connect_dialog, 'on_close_activate': self.shutdown, 'on_main_window_destroy': self.shutdown, 'on_connect_button_clicked': self.show_connect_dialog, 'on_quit_button_clicked': self.shutdown, 'on_table_list_row_activated': self.open_table, 'on_about1_activate': self.open_about_window, } self.glade_xml.signal_autoconnect(signals) treeselection = self.table_list.get_selection() treeselection.set_mode(gtk.SELECTION_SINGLE) # Reference to a network client. self.client = None self.connect_dialog = None # Set once connect dialog is open self.set_status("Connect to a server to begin playing.") main_window.show_all() # Autoconnect if given details, otherwise show connect dialog: if host != None and port != None and username != None and \ password != None: connect(host, port, username, password, self) else: self.show_connect_dialog(None) def main(self): """ Launch the GTK main loop. """ gtk.main() def shutdown(self, widget): """ Closes the application. """ if self.client != None: self.client.shutdown() logger.info("Stopping application.") gtk.main_quit() def open_table(self, treeview, row, column): """ Open a table window. Connected to the table list and called when the user selected a table to join. """ logger.info("Opening table window") model = treeview.get_model() logger.debug("row clicked: %s\n" "table id: %s\n" "table name: %s", row[0], model[row][0], model[row][1]) self.client.open_table(model[row][0]) def open_table_success(self, client_table): table_win = TableWindow(self, client_table) def show_connect_dialog(self, widget): """ Opens the connect to server dialog. """ if self.connect_dialog == None: self.connect_dialog = ConnectDialog(self) else: logger.debug("Connect dialog already open.") def connect_success(self, client): """ Callback used by the connect dialog after a connection to a server has been successfully made. """ logger.info("Connected to %s:%s as %s" % (client.host, client.port, client.username)) self.client = client # Call also sets our reference to None: if self.connect_dialog != None: self.connect_dialog.destroy(None, None, None) self.connect_button.set_sensitive(False) self.set_status("Connected to server: %s" % client.host) server_label = self.glade_xml.get_widget('server-label') server_label.set_text(client.host) username_label = self.glade_xml.get_widget('username-label') username_label.set_text(client.username) self.client.get_table_list() def connect_failure(self): """ Connection failed callback. """ logger.warn("Connect failed") self.connect_dialog.set_status("Login failed.") def list_tables_success(self, table_listings): """ Populate the list of tables in the main server window. GTK TreeView's aren't fun but this works in conjunction with the __cell_* methods to populate the columns. """ logger.debug("Populating table list") column_names = ["Table ID", "Name", "Limit", "Players"] cell_data_funcs = [self.__cell_table_id, self.__cell_table, self.__cell_limit, self.__cell_players] tables = gtk.ListStore(int, str, str, str) for table in table_listings: tables.append([table.id, table.name, table.limit, table.player_count]) columns = [None] * len(column_names) # Populate the table columns and cells: for n in range(0, len(column_names)): cell = gtk.CellRendererText() columns[n] = gtk.TreeViewColumn(column_names[n], cell) columns[n].set_cell_data_func(cell, cell_data_funcs[n]) self.table_list.append_column(columns[n]) self.table_list.set_model(tables) @staticmethod def _open_url(dialog, url, data): subprocess.call(['xdg-open', url]) @staticmethod def _open_email(dialog, email, data): subprocess.call(['xdg-email', email]) def open_about_window(self, menuitem): gtk.about_dialog_set_url_hook(self._open_url, None) gtk.about_dialog_set_email_hook(self._open_email, None) about = gtk.AboutDialog() about.set_name("Rounder") about.set_version("0.0.1") about.set_copyright("Copyright © 2008 Devan Goodwin & James Bowes") about.set_comments("Poker for the GNOME Desktop") # XXX Put the full license in here about.set_license("GPLv2") about.set_website("http://dangerouslyinc.com") about.set_website_label("http://dangerouslyinc.com") about.set_authors(('Devan Goodwin <[email protected]>', 'James Bowes <[email protected]>', 'Kenny MacDermid <[email protected]>')) about.set_artists(('Anette Goodwin <[email protected]>', 'James Bowes <[email protected]>')) about.set_logo(gtk.gdk.pixbuf_new_from_file( find_file_on_path(ROUNDER_LOGO_FILE))) about.set_icon_from_file(find_file_on_path(ROUNDER_ICON_FILE)) about.connect('response', lambda x, y: about.destroy()) about.show_all() def __cell_table_id(self, column, cell, model, iter): cell.set_property('text', model.get_value(iter, 0)) def __cell_table(self, column, cell, model, iter): cell.set_property('text', model.get_value(iter, 1)) def __cell_limit(self, column, cell, model, iter): cell.set_property('text', model.get_value(iter, 2)) def __cell_players(self, column, cell, model, iter): cell.set_property('text', model.get_value(iter, 3)) def set_status(self, message): """ Display a message in the main window's status bar. """ self.statusbar.push(self.statusbar.get_context_id("Rounder"), message) self.statusbar.show() class ConnectDialog(object): """ Dialog for connecting to a server. """ def __init__(self, app): logger.debug("Opening connect dialog.") self.app = app glade_file = 'rounder/ui/gtk/data/connect.glade' self.glade_xml = gtk.glade.XML(find_file_on_path(glade_file)) self.connect_dialog = self.glade_xml.get_widget('connect-dialog') self.connect_dialog.set_icon_from_file( find_file_on_path(ROUNDER_ICON_FILE)) signals = { 'on_connect_button_clicked': self.connect, } self.glade_xml.signal_autoconnect(signals) self.connect_dialog.connect("delete_event", self.destroy) self.connect_dialog.show_all() def connect(self, widget): """ Attempt to open a connection to the host and port specified. """ host_entry = self.glade_xml.get_widget('host-entry') host = host_entry.get_text() port_spinbutton = self.glade_xml.get_widget('port-spinbutton') port = port_spinbutton.get_value_as_int() username_entry = self.glade_xml.get_widget('username-entry') username = username_entry.get_text() password_entry = self.glade_xml.get_widget('password-entry') password = password_entry.get_text() logger.debug("Connecting to %s on port %s" "\n as: %s / %s", host, port, username, password) connect(host, port, username, password, self.app) def set_status(self, message): """ Display a message in the connect dialog's status bar. """ statusbar = self.glade_xml.get_widget('statusbar') statusbar.push(statusbar.get_context_id("Connect Dialog"), message) statusbar.show() def destroy(self, widget, event, data=None): """ Called by main Rounder application who receives the success callback from the network client. """ logger.debug("Closing connect dialog.") self.app.connect_dialog = None self.connect_dialog.destroy()
gpl-2.0
-6,898,350,797,561,719,000
36.244444
78
0.629304
false
terzeron/FeedMakerApplications
test/test.py
1
2410
#!/usr/bin/env python import sys import os import filecmp from feed_maker_util import exec_cmd def test_script(feed, script, work_dir, test_dir, index): os.chdir(work_dir) cmd = "cat %s/input.%d.txt | %s > %s/result.%d.temp" % (test_dir, index, script, test_dir, index) #print(cmd) (result, error) = exec_cmd(cmd) if not error: os.chdir(test_dir) return filecmp.cmp("result.%d.temp" % (index), "expected.output.%d.txt" % (index)), "", cmd print(error) return False, error, cmd def main(): fm_cwd = os.getenv("FEED_MAKER_WORK_DIR") test_subjects = { "naver/navercast": [ "../capture_item_navercast.py" ], "naver/naverblog.pjwwoo": [ "../capture_item_naverblog.py" ], "naver/dice": [ "../capture_item_naverwebtoon.py" ], "naver/naverwebtoon": [ "./capture_item_link_title.py" ], "naver/naverpost.businessinsight": [ "../capture_item_naverpost.py", "../post_process_naverpost.py" ], "kakao/monk_xuanzang": [ "../capture_item_kakaowebtoon.py" ], "kakao/kakaowebtoon": [ "./capture_item_link_title.py" ], "daum/matchless_abi": [ "../capture_item_daumwebtoon.py", "../post_process_daumwebtoon.py 'http://cartoon.media.daum.net/m/webtoon/viewer/45820'" ], "daum/daumwebtoon": [ "./capture_item_link_title.py" ], "tistory/nasica1": [ "../capture_item_tistory.py" ], "egloos/oblivion": [ "../capture_item_link_title.py" ], "study/javabeat": [ "./capture_item_link_title.py" ], "manatoki/level_up_alone" : [ "../capture_item_manatoki.py" ], "jmana/one_punch_man_remake" : [ "../capture_item_jmana.py" ], "wfwf/warrior_at_fff_level" : [ "../capture_item_wfwf.py" ], "wtwt/login_alone" : [ "../capture_item_wtwt.py" ], "marumaru/ride_on_king" : [ "../capture_item_marumaru.py" ], } for (feed, scripts) in test_subjects.items(): index = 0 for script in scripts: index += 1 print(feed) work_dir = fm_cwd + "/" + feed test_dir = fm_cwd + "/test/" + feed result, error, cmd = test_script(feed, script, work_dir, test_dir, index) if error: print("Error in %s of %s\n%s\n%s" % (feed, script, cmd, error)) return -1 print("Ok") if __name__ == "__main__": sys.exit(main())
gpl-2.0
6,115,003,064,114,718,000
39.166667
156
0.56473
false
rflamary/POT
ot/stochastic.py
1
24589
""" Stochastic solvers for regularized OT. """ # Author: Kilian Fatras <[email protected]> # # License: MIT License import numpy as np ############################################################################## # Optimization toolbox for SEMI - DUAL problems ############################################################################## def coordinate_grad_semi_dual(b, M, reg, beta, i): r''' Compute the coordinate gradient update for regularized discrete distributions for (i, :) The function computes the gradient of the semi dual problem: .. math:: \max_v \sum_i (\sum_j v_j * b_j - reg * log(\sum_j exp((v_j - M_{i,j})/reg) * b_j)) * a_i Where : - M is the (ns,nt) metric cost matrix - v is a dual variable in R^J - reg is the regularization term - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the ASGD & SAG algorithms as proposed in [18]_ [alg.1 & alg.2] Parameters ---------- b : ndarray, shape (nt,) Target measure. M : ndarray, shape (ns, nt) Cost matrix. reg : float Regularization term > 0. v : ndarray, shape (nt,) Dual variable. i : int Picked number i. Returns ------- coordinate gradient : ndarray, shape (nt,) Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000) array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06], [1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03], [3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07], [2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04], [9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01], [2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01], [4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]]) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' r = M[i, :] - beta exp_beta = np.exp(-r / reg) * b khi = exp_beta / (np.sum(exp_beta)) return b - khi def sag_entropic_transport(a, b, M, reg, numItermax=10000, lr=None): r''' Compute the SAG algorithm to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1 = b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG algorithm as proposed in [18]_ [alg.1] Parameters ---------- a : ndarray, shape (ns,), Source measure. b : ndarray, shape (nt,), Target measure. M : ndarray, shape (ns, nt), Cost matrix. reg : float Regularization term > 0 numItermax : int Number of iteration. lr : float Learning rate. Returns ------- v : ndarray, shape (nt,) Dual variable. Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000) array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06], [1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03], [3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07], [2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04], [9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01], [2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01], [4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]]) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' if lr is None: lr = 1. / max(a / reg) n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_beta = np.zeros(n_target) stored_gradient = np.zeros((n_source, n_target)) sum_stored_gradient = np.zeros(n_target) for _ in range(numItermax): i = np.random.randint(n_source) cur_coord_grad = a[i] * coordinate_grad_semi_dual(b, M, reg, cur_beta, i) sum_stored_gradient += (cur_coord_grad - stored_gradient[i]) stored_gradient[i] = cur_coord_grad cur_beta += lr * (1. / n_source) * sum_stored_gradient return cur_beta def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None): r''' Compute the ASGD algorithm to solve the regularized semi continous measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the ASGD algorithm as proposed in [18]_ [alg.2] Parameters ---------- b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 numItermax : int Number of iteration. lr : float Learning rate. Returns ------- ave_v : ndarray, shape (nt,) dual variable Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000) array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06], [1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03], [3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07], [2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04], [9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01], [2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01], [4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]]) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' if lr is None: lr = 1. / max(a / reg) n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_beta = np.zeros(n_target) ave_beta = np.zeros(n_target) for cur_iter in range(numItermax): k = cur_iter + 1 i = np.random.randint(n_source) cur_coord_grad = coordinate_grad_semi_dual(b, M, reg, cur_beta, i) cur_beta += (lr / np.sqrt(k)) * cur_coord_grad ave_beta = (1. / k) * cur_beta + (1 - 1. / k) * ave_beta return ave_beta def c_transform_entropic(b, M, reg, beta): r''' The goal is to recover u from the c-transform. The function computes the c_transform of a dual variable from the other dual variable: .. math:: u = v^{c,reg} = -reg \sum_j exp((v - M)/reg) b_j Where : - M is the (ns,nt) metric cost matrix - u, v are dual variables in R^IxR^J - reg is the regularization term It is used to recover an optimal u from optimal v solving the semi dual problem, see Proposition 2.1 of [18]_ Parameters ---------- b : ndarray, shape (nt,) Target measure M : ndarray, shape (ns, nt) Cost matrix reg : float Regularization term > 0 v : ndarray, shape (nt,) Dual variable. Returns ------- u : ndarray, shape (ns,) Dual variable. Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000) array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06], [1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03], [3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07], [2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04], [9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01], [2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01], [4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]]) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' n_source = np.shape(M)[0] alpha = np.zeros(n_source) for i in range(n_source): r = M[i, :] - beta min_r = np.min(r) exp_beta = np.exp(-(r - min_r) / reg) * b alpha[i] = min_r - reg * np.log(np.sum(exp_beta)) return alpha def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None, log=False): r''' Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in [18]_ Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number of iteration lr : float learning rate n_source : int size of the source measure n_target : int size of the target measure log : bool, optional record log if True Returns ------- pi : ndarray, shape (ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000) array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06], [1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03], [3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07], [2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04], [9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01], [2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01], [4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]]) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' if method.lower() == "sag": opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr) elif method.lower() == "asgd": opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr) else: print("Please, select your method between SAG and ASGD") return None opt_alpha = c_transform_entropic(b, M, reg, opt_beta) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi ############################################################################## # Optimization toolbox for DUAL problems ############################################################################## def batch_grad_dual(a, b, M, reg, alpha, beta, batch_size, batch_alpha, batch_beta): r''' Computes the partial gradient of the dual optimal transport problem. For each (i,j) in a batch of coordinates, the partial gradients are : .. math:: \partial_{u_i} F = u_i * b_s/l_{v} - \sum_{j \in B_v} exp((u_i + v_j - M_{i,j})/reg) * a_i * b_j \partial_{v_j} F = v_j * b_s/l_{u} - \sum_{i \in B_u} exp((u_i + v_j - M_{i,j})/reg) * a_i * b_j Where : - M is the (ns,nt) metric cost matrix - u, v are dual variables in R^ixR^J - reg is the regularization term - :math:`B_u` and :math:`B_v` are lists of index - :math:`b_s` is the size of the batchs :math:`B_u` and :math:`B_v` - :math:`l_u` and :math:`l_v` are the lenghts of :math:`B_u` and :math:`B_v` - a and b are source and target weights (sum to 1) The algorithm used for solving the dual problem is the SGD algorithm as proposed in [19]_ [alg.1] Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 alpha : ndarray, shape (ns,) dual variable beta : ndarray, shape (nt,) dual variable batch_size : int size of the batch batch_alpha : ndarray, shape (bs,) batch of index of alpha batch_beta : ndarray, shape (bs,) batch of index of beta Returns ------- grad : ndarray, shape (ns,) partial grad F Examples -------- >>> import ot >>> np.random.seed(0) >>> n_source = 7 >>> n_target = 4 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> X_source = np.random.randn(n_source, 2) >>> Y_target = np.random.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg=1, batch_size=3, numItermax=30000, lr=0.1, log=True) >>> log['alpha'] array([0.71759102, 1.57057384, 0.85576566, 0.1208211 , 0.59190466, 1.197148 , 0.17805133]) >>> log['beta'] array([0.49741367, 0.57478564, 1.40075528, 2.75890102]) >>> sgd_dual_pi array([[2.09730063e-02, 8.38169324e-02, 7.50365455e-03, 8.72731415e-09], [5.58432437e-03, 5.89881299e-04, 3.09558411e-05, 8.35469849e-07], [3.26489515e-03, 7.15536035e-02, 2.99778211e-02, 3.02601593e-10], [4.05390622e-02, 5.31085068e-02, 6.65191787e-02, 1.55812785e-06], [7.82299812e-02, 6.12099102e-03, 4.44989098e-02, 2.37719187e-03], [5.06266486e-02, 2.16230494e-03, 2.26215141e-03, 6.81514609e-04], [6.06713990e-02, 3.98139808e-02, 5.46829338e-02, 8.62371424e-06]]) References ---------- [Seguy et al., 2018] : International Conference on Learning Representation (2018), arXiv preprint arxiv:1711.02283. ''' G = - (np.exp((alpha[batch_alpha, None] + beta[None, batch_beta] - M[batch_alpha, :][:, batch_beta]) / reg) * a[batch_alpha, None] * b[None, batch_beta]) grad_beta = np.zeros(np.shape(M)[1]) grad_alpha = np.zeros(np.shape(M)[0]) grad_beta[batch_beta] = (b[batch_beta] * len(batch_alpha) / np.shape(M)[0] + G.sum(0)) grad_alpha[batch_alpha] = (a[batch_alpha] * len(batch_beta) / np.shape(M)[1] + G.sum(1)) return grad_alpha, grad_beta def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr): r''' Compute the sgd algorithm to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 batch_size : int size of the batch numItermax : int number of iteration lr : float learning rate Returns ------- alpha : ndarray, shape (ns,) dual variable beta : ndarray, shape (nt,) dual variable Examples -------- >>> import ot >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 20000 >>> lr = 0.1 >>> batch_size = 3 >>> log = True >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log) >>> log['alpha'] array([0.64171798, 1.27932201, 0.78132257, 0.15638935, 0.54888354, 1.03663469, 0.20595781]) >>> log['beta'] array([0.51207194, 0.58033189, 1.28922676, 2.26859736]) >>> sgd_dual_pi array([[1.97276541e-02, 7.81248547e-02, 6.22136048e-03, 4.95442423e-09], [4.23494310e-03, 4.43286263e-04, 2.06927079e-05, 3.82389139e-07], [3.07542414e-03, 6.67897769e-02, 2.48904999e-02, 1.72030247e-10], [4.26271990e-02, 5.53375455e-02, 6.16535024e-02, 9.88812650e-07], [7.60423265e-02, 5.89585256e-03, 3.81267087e-02, 1.39458256e-03], [4.37557504e-02, 1.85189176e-03, 1.72335760e-03, 3.55491279e-04], [6.33096109e-02, 4.11683954e-02, 5.02962051e-02, 5.43097516e-06]]) References ---------- [Seguy et al., 2018] : International Conference on Learning Representation (2018), arXiv preprint arxiv:1711.02283. ''' n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_alpha = np.zeros(n_source) cur_beta = np.zeros(n_target) for cur_iter in range(numItermax): k = np.sqrt(cur_iter + 1) batch_alpha = np.random.choice(n_source, batch_size, replace=False) batch_beta = np.random.choice(n_target, batch_size, replace=False) update_alpha, update_beta = batch_grad_dual(a, b, M, reg, cur_alpha, cur_beta, batch_size, batch_alpha, batch_beta) cur_alpha[batch_alpha] += (lr / k) * update_alpha[batch_alpha] cur_beta[batch_beta] += (lr / k) * update_beta[batch_beta] return cur_alpha, cur_beta def solve_dual_entropic(a, b, M, reg, batch_size, numItermax=10000, lr=1, log=False): r''' Compute the transportation matrix to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) Parameters ---------- a : ndarray, shape (ns,) source measure b : ndarray, shape (nt,) target measure M : ndarray, shape (ns, nt) cost matrix reg : float Regularization term > 0 batch_size : int size of the batch numItermax : int number of iteration lr : float learning rate log : bool, optional record log if True Returns ------- pi : ndarray, shape (ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> import ot >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 20000 >>> lr = 0.1 >>> batch_size = 3 >>> log = True >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log) >>> log['alpha'] array([0.64057733, 1.2683513 , 0.75610161, 0.16024284, 0.54926534, 1.0514201 , 0.19958936]) >>> log['beta'] array([0.51372571, 0.58843489, 1.27993921, 2.24344807]) >>> sgd_dual_pi array([[1.97377795e-02, 7.86706853e-02, 6.15682001e-03, 4.82586997e-09], [4.19566963e-03, 4.42016865e-04, 2.02777272e-05, 3.68823708e-07], [3.00379244e-03, 6.56562018e-02, 2.40462171e-02, 1.63579656e-10], [4.28626062e-02, 5.60031599e-02, 6.13193826e-02, 9.67977735e-07], [7.61972739e-02, 5.94609051e-03, 3.77886693e-02, 1.36046648e-03], [4.44810042e-02, 1.89476742e-03, 1.73285847e-03, 3.51826036e-04], [6.30118293e-02, 4.12398660e-02, 4.95148998e-02, 5.26247246e-06]]) References ---------- [Seguy et al., 2018] : International Conference on Learning Representation (2018), arXiv preprint arxiv:1711.02283. ''' opt_alpha, opt_beta = sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi
mit
-1,453,941,329,542,792,700
31.568212
126
0.563057
false
jmikkola/Lexington
src/lexington/__init__.py
1
3175
import collections from werkzeug.wrappers import Response from lexington.util import di from lexington.util import route from lexington.util import view_map from lexington.util import paths def default_dependencies(settings): dependencies = di.Dependencies() dependencies.register_value('settings', settings) dependencies.register_value('respond', Response) dependencies.register_late_bound_value('environ') paths.register_all(dependencies) return dependencies def app(): """ Helper function to construct the application factory(!) """ settings = {} dependencies = default_dependencies(settings) views = view_map.ViewMapFactory() routes = route.Routes() return ApplicationFactory(settings, dependencies, views, routes) class ApplicationFactory: def __init__(self, settings, dependencies, views, routes): self._settings = settings self._dependencies = dependencies self._views = views self._routes = routes def add_route(self, route_name, method, path_description): self._routes.add_route(route_name, method, path_description) def add_view_fn(self, route_name, fn, dependencies=None): if dependencies is None: dependencies = [] view = view_map.View(fn, route_name, dependencies) self.add_view(view) def add_view(self, view): self._views.add_view(view) def add_value(self, name, value): self._dependencies.register_value(name, value) def add_factory(self, name, factory_fn, dependencies=None): self._dependencies.register_factory(name, factory_fn, dependencies) def create_app(self): self._dependencies.check_dependencies() routing = self._routes.get_routing() view_map = self._views.create( routing.get_names(), self._dependencies.provided_dependencies() ) return Application(self._dependencies, view_map, routing) class Application: def __init__(self, dependencies, view_map, routing): self._dependencies = dependencies self._view_map = view_map self._routing = routing def __call__(self, environ, start_response): response = self._get_response(environ) return response(environ, start_response) def _get_response(self, environ): injector = self._dependencies.build_injector(late_bound_values={ 'environ': environ, }) method = injector.get_dependency('method') path = injector.get_dependency('path') route_name, segment_matches = self._routing.path_to_route(path, method) if route_name is None: return self._404('Route not found') view = self._view_map.get_view(route_name) if view is None: return self._404('No view found for route ' + route_name) result = injector.inject(view.fn, view.dependencies) if isinstance(result, Response): return result else: # Assume that the result is text return Response(result, mimetype='text/plain') def _404(self, message): return Response(message, status=404)
mit
-8,198,006,270,251,723,000
32.421053
79
0.656693
false
111pontes/ydk-py
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_infra_locale_cfg.py
1
10790
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'LocaleLanguageEnum' : _MetaInfoEnum('LocaleLanguageEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg', { 'aa':'aa', 'ab':'ab', 'af':'af', 'am':'am', 'ar':'ar', 'as':'as_', 'ay':'ay', 'az':'az', 'ba':'ba', 'be':'be', 'bg':'bg', 'bh':'bh', 'bi':'bi', 'bn':'bn', 'bo':'bo', 'br':'br', 'ca':'ca', 'co':'co', 'cs':'cs', 'cy':'cy', 'da':'da', 'de':'de', 'dz':'dz', 'el':'el', 'en':'en', 'eo':'eo', 'es':'es', 'et':'et', 'eu':'eu', 'fa':'fa', 'fi':'fi', 'fj':'fj', 'fo':'fo', 'fr':'fr', 'fy':'fy', 'ga':'ga', 'gd':'gd', 'gl':'gl', 'gn':'gn', 'gu':'gu', 'ha':'ha', 'he':'he', 'hi':'hi', 'hr':'hr', 'hu':'hu', 'hy':'hy', 'ia':'ia', 'id':'id', 'ie':'ie', 'ik':'ik', 'is':'is_', 'it':'it', 'iu':'iu', 'ja':'ja', 'jw':'jw', 'ka':'ka', 'kk':'kk', 'kl':'kl', 'km':'km', 'kn':'kn', 'ko':'ko', 'ks':'ks', 'ku':'ku', 'ky':'ky', 'la':'la', 'ln':'ln', 'lo':'lo', 'lt':'lt', 'lv':'lv', 'mg':'mg', 'mi':'mi', 'mk':'mk', 'ml':'ml', 'mn':'mn', 'mo':'mo', 'mr':'mr', 'ms':'ms', 'mt':'mt', 'my':'my', 'na':'na', 'ne':'ne', 'nl':'nl', 'no':'no', 'oc':'oc', 'om':'om', 'or':'or_', 'pa':'pa', 'pl':'pl', 'ps':'ps', 'pt':'pt', 'qu':'qu', 'rm':'rm', 'rn':'rn', 'ro':'ro', 'ru':'ru', 'rw':'rw', 'sa':'sa', 'sd':'sd', 'sg':'sg', 'sh':'sh', 'si':'si', 'sk':'sk', 'sl':'sl', 'sm':'sm', 'sn':'sn', 'so':'so', 'sq':'sq', 'sr':'sr', 'ss':'ss', 'st':'st', 'su':'su', 'sv':'sv', 'sw':'sw', 'ta':'ta', 'te':'te', 'tg':'tg', 'th':'th', 'ti':'ti', 'tk':'tk', 'tl':'tl', 'tn':'tn', 'to':'to', 'tr':'tr', 'ts':'ts', 'tt':'tt', 'tw':'tw', 'ug':'ug', 'uk':'uk', 'ur':'ur', 'uz':'uz', 'vi':'vi', 'vo':'vo', 'wo':'wo', 'xh':'xh', 'yi':'yi', 'yo':'yo', 'za':'za', 'zh':'zh', 'zu':'zu', }, 'Cisco-IOS-XR-infra-infra-locale-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg']), 'LocaleCountryEnum' : _MetaInfoEnum('LocaleCountryEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg', { 'ad':'ad', 'ae':'ae', 'af':'af', 'ag':'ag', 'ai':'ai', 'al':'al', 'am':'am', 'an':'an', 'ao':'ao', 'aq':'aq', 'ar':'ar', 'as':'as_', 'at':'at', 'au':'au', 'aw':'aw', 'az':'az', 'ba':'ba', 'bb':'bb', 'bd':'bd', 'be':'be', 'bf':'bf', 'bg':'bg', 'bh':'bh', 'bi':'bi', 'bj':'bj', 'bm':'bm', 'bn':'bn', 'bo':'bo', 'br':'br', 'bs':'bs', 'bt':'bt', 'bv':'bv', 'bw':'bw', 'by':'by', 'bz':'bz', 'ca':'ca', 'cc':'cc', 'cd':'cd', 'cf':'cf', 'cg':'cg', 'ch':'ch', 'ci':'ci', 'ck':'ck', 'cl':'cl', 'cm':'cm', 'cn':'cn', 'co':'co', 'cr':'cr', 'cu':'cu', 'cv':'cv', 'cx':'cx', 'cy':'cy', 'cz':'cz', 'de':'de', 'dj':'dj', 'dk':'dk', 'dm':'dm', 'do':'do', 'dz':'dz', 'ec':'ec', 'ee':'ee', 'eg':'eg', 'eh':'eh', 'er':'er', 'es':'es', 'et':'et', 'fi':'fi', 'fj':'fj', 'fk':'fk', 'fm':'fm', 'fo':'fo', 'fr':'fr', 'ga':'ga', 'gb':'gb', 'gd':'gd', 'ge':'ge', 'gf':'gf', 'gh':'gh', 'gi':'gi', 'gl':'gl', 'gm':'gm', 'gn':'gn', 'gp':'gp', 'gq':'gq', 'gr':'gr', 'gs':'gs', 'gt':'gt', 'gu':'gu', 'gw':'gw', 'gy':'gy', 'hk':'hk', 'hm':'hm', 'hn':'hn', 'hr':'hr', 'ht':'ht', 'hu':'hu', 'id':'id', 'ie':'ie', 'il':'il', 'in':'in_', 'io':'io', 'iq':'iq', 'ir':'ir', 'is':'is_', 'it':'it', 'jm':'jm', 'jo':'jo', 'jp':'jp', 'ke':'ke', 'kg':'kg', 'kh':'kh', 'ki':'ki', 'km':'km', 'kn':'kn', 'kp':'kp', 'kr':'kr', 'kw':'kw', 'ky':'ky', 'kz':'kz', 'la':'la', 'lb':'lb', 'lc':'lc', 'li':'li', 'lk':'lk', 'lr':'lr', 'ls':'ls', 'lt':'lt', 'lu':'lu', 'lv':'lv', 'ly':'ly', 'ma':'ma', 'mc':'mc', 'md':'md', 'mg':'mg', 'mh':'mh', 'mk':'mk', 'ml':'ml', 'mm':'mm', 'mn':'mn', 'mo':'mo', 'mp':'mp', 'mq':'mq', 'mr':'mr', 'ms':'ms', 'mt':'mt', 'mu':'mu', 'mv':'mv', 'mw':'mw', 'mx':'mx', 'my':'my', 'mz':'mz', 'na':'na', 'nc':'nc', 'ne':'ne', 'nf':'nf', 'ng':'ng', 'ni':'ni', 'nl':'nl', 'no':'no', 'np':'np', 'nr':'nr', 'nu':'nu', 'nz':'nz', 'om':'om', 'pa':'pa', 'pe':'pe', 'pf':'pf', 'pg':'pg', 'ph':'ph', 'pk':'pk', 'pl':'pl', 'pm':'pm', 'pn':'pn', 'pr':'pr', 'pt':'pt', 'pw':'pw', 'py':'py', 'qa':'qa', 're':'re', 'ro':'ro', 'ru':'ru', 'rw':'rw', 'sa':'sa', 'sb':'sb', 'sc':'sc', 'sd':'sd', 'se':'se', 'sg':'sg', 'sh':'sh', 'si':'si', 'sj':'sj', 'sk':'sk', 'sl':'sl', 'sm':'sm', 'sn':'sn', 'so':'so', 'sr':'sr', 'st':'st', 'sv':'sv', 'sy':'sy', 'sz':'sz', 'tc':'tc', 'td':'td', 'tf':'tf', 'tg':'tg', 'th':'th', 'tj':'tj', 'tk':'tk', 'tm':'tm', 'tn':'tn', 'to':'to', 'tp':'tp', 'tr':'tr', 'tt':'tt', 'tv':'tv', 'tw':'tw', 'tz':'tz', 'ua':'ua', 'ug':'ug', 'um':'um', 'us':'us', 'uy':'uy', 'uz':'uz', 'va':'va', 'vc':'vc', 've':'ve', 'vg':'vg', 'vi':'vi', 'vn':'vn', 'vu':'vu', 'wf':'wf', 'ws':'ws', 'ye':'ye', 'yt':'yt', 'yu':'yu', 'za':'za', 'zm':'zm', 'zw':'zw', }, 'Cisco-IOS-XR-infra-infra-locale-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg']), 'Locale' : { 'meta_info' : _MetaInfoClass('Locale', False, [ _MetaInfoClassMember('country', REFERENCE_ENUM_CLASS, 'LocaleCountryEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg', 'LocaleCountryEnum', [], [], ''' Name of country locale ''', 'country', 'Cisco-IOS-XR-infra-infra-locale-cfg', False), _MetaInfoClassMember('language', REFERENCE_ENUM_CLASS, 'LocaleLanguageEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg', 'LocaleLanguageEnum', [], [], ''' Name of language locale ''', 'language', 'Cisco-IOS-XR-infra-infra-locale-cfg', False), ], 'Cisco-IOS-XR-infra-infra-locale-cfg', 'locale', _yang_ns._namespaces['Cisco-IOS-XR-infra-infra-locale-cfg'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_infra_locale_cfg' ), }, }
apache-2.0
-6,472,914,570,276,750,000
24.56872
197
0.260519
false
HaebinShin/tensorflow
tensorflow/contrib/losses/python/losses/loss_ops_test.py
1
34080
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for contrib.losses.python.losses.loss_ops.""" # pylint: disable=unused-import,g-bad-import-order from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: enable=unused-import import numpy as np import tensorflow as tf class AbsoluteDifferenceLossTest(tf.test.TestCase): def setUp(self): self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) def testValueErrorThrownWhenWeightIsNone(self): with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.absolute_difference( self._predictions, self._predictions, weight=None) def testAllCorrectNoLossWeight(self): loss = tf.contrib.losses.absolute_difference( self._predictions, self._predictions) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) def testNonZeroLoss(self): loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets) with self.test_session(): self.assertAlmostEqual(5.5, loss.eval(), 3) def testNonZeroLossWithPythonScalarWeight(self): weight = 2.3 loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(5.5 * weight, loss.eval(), 3) def testNonZeroLossWithScalarTensorWeight(self): weight = 2.3 loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, tf.constant(weight)) with self.test_session(): self.assertAlmostEqual(5.5 * weight, loss.eval(), 3) def testNonZeroLossWithOneDimBatchSpecificWeights(self): weight = tf.constant([1.2, 0.0], shape=[2,]) loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(5.6, loss.eval(), 3) def testNonZeroLossWithTwoDimBatchSpecificWeights(self): weight = tf.constant([1.2, 0.0], shape=[2, 1]) loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(5.6, loss.eval(), 3) def testNonZeroLossWithSampleSpecificWeights(self): weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3]) loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(16.6, loss.eval(), 3) def testNonZeroLossWithSampleSpecificWeightsMostZero(self): weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3]) loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(6.0, loss.eval(), 3) def testLossWithSampleSpecificWeightsAllZero(self): weight = tf.zeros((2, 3)) loss = tf.contrib.losses.absolute_difference( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) class SoftmaxCrossEntropyLossTest(tf.test.TestCase): def testNoneWeightRaisesValueError(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.softmax_cross_entropy(logits, labels, weight=None) def testAllCorrect(self): with self.test_session(): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) loss = tf.contrib.losses.softmax_cross_entropy(logits, labels) self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value') self.assertAlmostEqual(loss.eval(), 0.0, 3) def testAllWrong(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy(logits, labels) self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value') self.assertAlmostEqual(loss.eval(), 10.0, 3) def testNonZeroLossWithPythonScalarWeight(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = 2.3 with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight) self.assertAlmostEqual(loss.eval(), weight * 10.0, 3) def testNonZeroLossWithScalarTensorWeight(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = 2.3 with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy( logits, labels, tf.constant(weight)) self.assertAlmostEqual(loss.eval(), weight * 10.0, 3) def testNonZeroLossWithOneDimBatchSpecificWeights(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = tf.constant([1.2, 3.4, 5.6], shape=[3]) with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight) self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3) def testAllWrongAllMissing(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = tf.constant([0, 0, 0], shape=[3]) with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight) self.assertAlmostEqual(loss.eval(), 0.0, 3) def testSomeMissing(self): logits = tf.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = tf.constant([1.2, 0, 0], shape=[3]) with self.test_session(): loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight) self.assertAlmostEqual(loss.eval(), 12.0, 3) def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self): with self.test_session(): logits = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) weight = tf.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]]) with self.assertRaises(ValueError): tf.contrib.losses.softmax_cross_entropy( logits, labels, weight=weight).eval() def testSoftmaxLabelSmoothing(self): with self.test_session(): # Softmax Cross Entropy Loss is: # -\sum_i p_i \log q_i # where for a softmax activation # \log q_i = x_i - \log \sum_j \exp x_j # = x_i - x_max - \log \sum_j \exp (x_j - x_max) # For our activations, [100, -100, -100] the log partion function becomes # \log ( exp(0) + exp(-200) + exp(-200) ) = 0 # so our log softmaxes become: [0, -200, -200] # so our cross entropy loss is: # -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n logits = tf.constant([[100.0, -100.0, -100.0]]) labels = tf.constant([[1, 0, 0]]) label_smoothing = 0.1 loss = tf.contrib.losses.softmax_cross_entropy( logits, labels, label_smoothing=label_smoothing) self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value') expected_value = 400.0 * label_smoothing / 3.0 self.assertAlmostEqual(loss.eval(), expected_value, 3) class SigmoidCrossEntropyLossTest(tf.test.TestCase): def testAllCorrectSigmoid(self): with self.test_session(): logits = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels) self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value') self.assertAlmostEqual(loss.eval(), 0.0, 3) def testAllWrongSigmoid(self): with self.test_session(): logits = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels) self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value') self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3) def testAllWrongSigmoidWithMeasurementSpecificWeights(self): with self.test_session(): logits = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weight = tf.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]]) loss = tf.contrib.losses.sigmoid_cross_entropy( logits, labels, weight=weight) self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value') self.assertAlmostEqual(loss.eval(), 1700.0 / 7.0, 3) def testMultiCorrectSigmoid(self): logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0], [-100.0, 100.0, 100.0]]) labels = tf.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]]) loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels) self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value') with self.test_session(): self.assertAlmostEqual(loss.eval(), 0.0, 3) def testSigmoidLabelSmoothingCorrect(self): with self.test_session(): logits = tf.constant([[100.0, -100.0, -100.0]]) labels = tf.constant([[1, 0, 1]]) # Sigmoid cross entropy loss is: # max(x,0) - x*z + log(1 + exp(-abs(x))) # The new labels are: # z' = z * (1 - L) + 0.5 L # 1 -> 1 - 0.5 L # 0 -> 0.5 L # here we expect: # 1/3 * (100 - 100 * (1 - 0.5 L) + 0 # + 0 + 100 * (0.5 L) + 0 # + 0 + 100 * (1 - 0.5 L) + 0) # = 1/3 * (100 + 50 L) label_smoothing = 0.1 loss = tf.contrib.losses.sigmoid_cross_entropy( logits, labels, label_smoothing=label_smoothing) self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value') expected_value = (100.0 + 50.0 * label_smoothing) / 3.0 self.assertAlmostEqual(loss.eval(), expected_value, 3) def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self): with self.test_session(): label_smoothing = 0.1 sigmoid_logits = tf.constant([[100.0, -100.0, -100.0]]) sigmoid_labels = tf.constant([[1, 0, 1]]) sigmoid_loss = tf.contrib.losses.sigmoid_cross_entropy( sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing) softmax_logits = tf.constant([[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]]) softmax_labels = tf.constant([[0, 1], [1, 0], [0, 1]]) softmax_loss = tf.contrib.losses.softmax_cross_entropy( softmax_logits, softmax_labels, label_smoothing=label_smoothing) self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3) class LogLossTest(tf.test.TestCase): def setUp(self): predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3)) targets = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3)) self._np_predictions = predictions self._np_targets = targets epsilon = 1e-7 self._expected_losses = np.multiply( targets, np.log(predictions + epsilon)) + np.multiply( 1 - targets, np.log(1 - predictions + epsilon)) self._predictions = tf.constant(predictions) self._targets = tf.constant(targets) def testValueErrorThrownWhenWeightIsNone(self): with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.log_loss(self._targets, self._targets, weight=None) def testAllCorrectNoLossWeight(self): loss = tf.contrib.losses.log_loss(self._targets, self._targets) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) def testAllCorrectNoLossWeightWithPlaceholder(self): tf_predictions = tf.placeholder(tf.float32, shape=self._np_targets.shape) loss = tf.contrib.losses.log_loss(tf_predictions, self._targets) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(feed_dict={ tf_predictions: self._np_targets}), 3) def testNonZeroLoss(self): loss = tf.contrib.losses.log_loss(self._predictions, self._targets) with self.test_session(): self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0, loss.eval(), 3) def testNonZeroLossWithPythonScalarWeight(self): weight = 2.3 loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0, loss.eval(), 3) def testNonZeroLossWithScalarTensorWeight(self): weight = 2.3 loss = tf.contrib.losses.log_loss( self._predictions, self._targets, tf.constant(weight)) with self.test_session(): self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0, loss.eval(), 3) def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self): tf_predictions = tf.placeholder(tf.float32, shape=self._np_predictions.shape) weight = 2.3 loss = tf.contrib.losses.log_loss( tf_predictions, self._targets, tf.constant(weight)) with self.test_session() as sess: loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions}) self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0, loss, 3) def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self): tf_predictions = tf.placeholder(tf.float32, shape=[None, None]) weight = 2.3 loss = tf.contrib.losses.log_loss( tf_predictions, self._targets, tf.constant(weight)) with self.test_session() as sess: loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions}) self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0, loss, 3) def testNonZeroLossWithOneDimBatchSpecificWeights(self): weight = tf.constant([1.2, 3.4], shape=[2]) expected_losses = np.multiply( self._expected_losses, np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3) def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self): weight = tf.constant([1.2, 0], shape=[2]) expected_losses = np.multiply( self._expected_losses, np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3))) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3) def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self): weight = tf.constant([1.2, 0], shape=[2, 1]) expected_losses = np.multiply( self._expected_losses, np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3))) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3) def testWeightsWithSameNumDimsButWrongShapeThrowsException(self): weight = tf.constant(np.random.normal(size=(2, 4)), shape=[2, 4]) with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.log_loss(self._predictions, self._targets, weight) def testNonZeroLossWithMeasurementSpecificWeights(self): weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3)) expected_losses = np.multiply(self._expected_losses, weight) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight=tf.constant(weight, shape=(2, 3))) with self.test_session(): self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3) def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self): weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3)) expected_losses = np.multiply(self._expected_losses, weight) tf_predictions = tf.placeholder(tf.float32, shape=[2, 3]) loss = tf.contrib.losses.log_loss( tf_predictions, self._targets, weight=tf.constant(weight, shape=(2, 3))) with self.test_session() as sess: loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions}) self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3) def testNonZeroLossWithSampleSpecificWeightsMostZero(self): weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3)) expected_losses = np.multiply(self._expected_losses, weight) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, weight=tf.constant(weight, shape=(2, 3))) with self.test_session(): self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3) def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self): weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3)) expected_losses = np.multiply(self._expected_losses, weight) tf_predictions = tf.placeholder(tf.float32, shape=[2, 3]) tf_weight = tf.constant(weight, shape=(2, 3)) loss = tf.contrib.losses.log_loss(tf_predictions, self._targets, tf_weight) with self.test_session() as sess: loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions}) self.assertAlmostEqual(-np.sum(expected_losses), loss, 3) def testLossWithSampleSpecificWeightsAllZero(self): tf_weight = tf.zeros(shape=(2, 3)) loss = tf.contrib.losses.log_loss( self._predictions, self._targets, tf_weight) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) class SumOfSquaresLossTest(tf.test.TestCase): def setUp(self): self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3)) self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3)) def testValueErrorThrownWhenWeightIsNone(self): with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.sum_of_squares( self._predictions, self._predictions, weight=None) def testAllCorrectNoLossWeight(self): loss = tf.contrib.losses.sum_of_squares( self._predictions, self._predictions) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) def testNonZeroLoss(self): loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets) with self.test_session(): self.assertAlmostEqual(49.5, loss.eval(), 3) def testNonZeroLossWithPythonScalarWeight(self): weight = 2.3 loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(49.5 * weight, loss.eval(), 3) def testNonZeroLossWithScalarTensorWeight(self): weight = 2.3 loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, tf.constant(weight)) with self.test_session(): self.assertAlmostEqual(49.5 * weight, loss.eval(), 3) def testNonZeroLossWithOneDimBatchSpecificWeights(self): weight = tf.constant([1.2, 3.4], shape=[2,]) loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3) def testNonZeroLossWithTwoDimBatchSpecificWeights(self): weight = tf.constant([1.2, 3.4], shape=[2, 1]) loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3) def testNonZeroLossWithSampleSpecificWeights(self): weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3]) loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(587 / 5.0, loss.eval(), 3) def testNonZeroLossWithSampleSpecificWeightsMostZero(self): weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3]) loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(18.0, loss.eval(), 3) def testLossWithSampleSpecificWeightsAllZero(self): weight = tf.zeros((2, 3)) loss = tf.contrib.losses.sum_of_squares( self._predictions, self._targets, weight) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) class SumOfPairwiseSquaresLossTest(tf.test.TestCase): def setUp(self): self._predictions = np.array([[4, 8, 12], [8, 1, 3]]) self._targets = np.array([[1, 9, 2], [-5, -5, 7]]) batch_size, dims = self._targets.shape # Compute the expected loss 'manually'. total = np.zeros((batch_size, 1)) for b in range(batch_size): for i in range(dims): for j in range(dims): x = self._predictions[b, i].item() - self._predictions[b, j].item() y = self._targets[b, i].item() - self._targets[b, j].item() tmp = (x-y) * (x-y) total[b] += tmp self._expected_losses = np.divide(total, 9.0) def testValueErrorThrownWhenWeightIsNone(self): with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._targets), targets=tf.constant(self._targets), weight=None) def testAllCorrectNoLossWeight(self): loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._targets), targets=tf.constant(self._targets)) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) def testNonZeroLoss(self): loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets)) with self.test_session(): self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3) def testGradientWithZeroWeight(self): with tf.Graph().as_default(): tf.set_random_seed(0) inputs = tf.ones((2, 3)) weights = tf.get_variable('weights', shape=[3, 4], initializer=tf.truncated_normal_initializer()) predictions = tf.matmul(inputs, weights) optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions, predictions, 0) gradients_to_variables = optimizer.compute_gradients(loss) init_op = tf.initialize_all_variables() with self.test_session() as sess: sess.run(init_op) for grad, _ in gradients_to_variables: np_grad = sess.run(grad) self.assertFalse(np.isnan(np_grad).any()) def testNonZeroLossWithPythonScalarWeight(self): weight = 2.3 loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=weight) with self.test_session(): self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss.eval(), 3) def testNonZeroLossWithScalarTensorWeight(self): weight = 2.3 loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=tf.constant(weight)) with self.test_session(): self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss.eval(), 3) def testNonZeroLossWithScalarZeroWeight(self): weight = 0 loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=tf.constant(weight)) with self.test_session(): self.assertAlmostEqual(0, loss.eval(), 3) def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self): weight = 2.3 tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape) tf_targets = tf.placeholder(tf.float32, shape=self._targets.shape) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf_predictions, targets=tf_targets, weight=tf.constant(weight)) with self.test_session() as sess: loss = sess.run(loss, feed_dict={ tf_predictions: self._predictions, tf_targets: self._targets, }) self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss, 3) def testNonZeroLossWithOneDimBatchSpecificWeights(self): weight = np.asarray([2.0, 1.0]).reshape((2, 1)) expected_losses = np.multiply(weight, self._expected_losses) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=tf.constant(weight, shape=[2])) with self.test_session(): self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3) def testZeroLossWithOneDimBatchZeroWeights(self): weight = np.asarray([0.0, 0.0]).reshape((2, 1)) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=tf.constant(weight, shape=[2])) with self.test_session(): self.assertAlmostEqual(0, loss.eval(), 3) def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self): weight = np.asarray([1.2, 3.4]).reshape((2, 1)) expected_losses = np.multiply(weight, self._expected_losses) tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape) tf_targets = tf.placeholder(tf.int32, shape=self._targets.shape) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf_predictions, targets=tf_targets, weight=tf.constant(weight, shape=[2])) with self.test_session() as sess: loss = sess.run(loss, feed_dict={ tf_predictions: self._predictions, tf_targets: self._targets, }) self.assertAlmostEqual(np.sum(expected_losses), loss, 3) def testLossWithAllZeroBatchSpecificWeights(self): weight = np.zeros((2, 1)) loss = tf.contrib.losses.sum_of_pairwise_squares( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), weight=tf.constant(weight, shape=[2])) with self.test_session(): self.assertAlmostEqual(0.0, loss.eval(), 3) class CosineDistanceLossTest(tf.test.TestCase): def setUp(self): self._predictions = np.asarray([[1, 0, 0], # Batch 1 [0, 0, -1], [1, 0, 0], # Batch 2 [1, 0, 0], [0, 0, -1], # Batch 3 [1, 0, 0]]).reshape((3, 2, 3)) self._targets = np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0]]).reshape((3, 2, 3)) def testValueErrorThrownWhenWeightIsNone(self): with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.cosine_distance( predictions=tf.constant(self._targets), targets=tf.constant(self._targets), dim=2, weight=None) def testAllCorrectNoWeights(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._targets), targets=tf.constant(self._targets), dim=2) with self.test_session(): self.assertAlmostEqual(0, loss.eval(), 5) def testPartiallyCorrectWithIntegerValues(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), dim=2) with self.test_session(): self.assertAlmostEqual(1, loss.eval(), 5) def testPartiallyCorrectFloatingPointValues(self): predictions = np.matrix(( '0.819031913261206 0.567041924552012 0.087465312324590;' '-0.665139432070255 -0.739487441769973 -0.103671883216994;' '0.707106781186548 -0.707106781186548 0')) targets = np.matrix(( '0.819031913261206 0.567041924552012 0.087465312324590;' '0.665139432070255 0.739487441769973 0.103671883216994;' '0.707106781186548 0.707106781186548 0')) tf_preds = tf.constant(predictions, shape=(3, 1, 3), dtype=tf.float32) tf_targets = tf.constant(targets, shape=(3, 1, 3), dtype=tf.float32) loss = tf.contrib.losses.cosine_distance(tf_preds, tf_targets, dim=2) with self.test_session(): self.assertAlmostEqual(1.0, loss.eval(), 5) def testSampleSpecificWeights(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), dim=2, weight=tf.constant([1, 0, 0])) with self.test_session(): self.assertEqual(1.0, loss.eval()) def testMeasurementSpecificWeights(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), dim=2, weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2))) with self.test_session(): self.assertEqual(3.0 / 4.0, loss.eval()) def testValueErrorThrownWithShapelessPlaceholder(self): tf_predictions = tf.placeholder(tf.float32) with self.test_session(): with self.assertRaises(ValueError): tf.contrib.losses.cosine_distance( predictions=tf_predictions, targets=tf.constant(self._targets), dim=2, weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2))) def testMeasurementSpecificWeightsWithPlaceholderWithShape(self): tf_predictions = tf.placeholder(tf.float32, shape=self._targets.shape) loss = tf.contrib.losses.cosine_distance( predictions=tf_predictions, targets=tf.constant(self._targets), dim=2, weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2))) with self.test_session() as sess: loss = sess.run(loss, feed_dict={tf_predictions: self._predictions}) self.assertEqual(3.0 / 4.0, loss) def testZeroLossWhenAllSampleSpecificWeightsAreZero(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), dim=2, weight=tf.zeros((3,))) with self.test_session(): self.assertEqual(0, loss.eval()) def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self): loss = tf.contrib.losses.cosine_distance( predictions=tf.constant(self._predictions), targets=tf.constant(self._targets), dim=2, weight=tf.zeros((3, 2))) with self.test_session(): self.assertEqual(0, loss.eval()) if __name__ == '__main__': tf.test.main()
apache-2.0
-7,149,207,294,253,049,000
38.813084
80
0.604079
false
kevin-intel/scikit-learn
sklearn/feature_extraction/text.py
2
70670
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Lars Buitinck # Robert Layton <[email protected]> # Jochen Wersdörfer <[email protected]> # Roman Sinayev <[email protected]> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ import array from collections import defaultdict from collections.abc import Mapping from functools import partial import numbers from operator import itemgetter import re import unicodedata import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..preprocessing import normalize from ._hash import FeatureHasher from ._stop_words import ENGLISH_STOP_WORDS from ..utils.validation import check_is_fitted, check_array, FLOAT_DTYPES from ..utils import _IS_32BIT from ..utils.fixes import _astype_copy_false from ..exceptions import NotFittedError __all__ = ['HashingVectorizer', 'CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def _preprocess(doc, accent_function=None, lower=False): """Chain together an optional series of text preprocessing steps to apply to a document. Parameters ---------- doc: str The string to preprocess accent_function: callable, default=None Function for handling accented characters. Common strategies include normalizing and removing. lower: bool, default=False Whether to use str.lower to lowercase all fo the text Returns ------- doc: str preprocessed string """ if lower: doc = doc.lower() if accent_function is not None: doc = accent_function(doc) return doc def _analyze(doc, analyzer=None, tokenizer=None, ngrams=None, preprocessor=None, decoder=None, stop_words=None): """Chain together an optional series of text processing steps to go from a single document to ngrams, with or without tokenizing or preprocessing. If analyzer is used, only the decoder argument is used, as the analyzer is intended to replace the preprocessor, tokenizer, and ngrams steps. Parameters ---------- analyzer: callable, default=None tokenizer: callable, default=None ngrams: callable, default=None preprocessor: callable, default=None decoder: callable, default=None stop_words: list, default=None Returns ------- ngrams: list A sequence of tokens, possibly with pairs, triples, etc. """ if decoder is not None: doc = decoder(doc) if analyzer is not None: doc = analyzer(doc) else: if preprocessor is not None: doc = preprocessor(doc) if tokenizer is not None: doc = tokenizer(doc) if ngrams is not None: if stop_words is not None: doc = ngrams(doc, stop_words) else: doc = ngrams(doc) return doc def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. Parameters ---------- s : string The string to strip See Also -------- strip_accents_ascii : Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ try: # If `s` is ASCII-compatible, then it does not contain any accented # characters and we can avoid an expensive list comprehension s.encode("ASCII", errors="strict") return s except UnicodeEncodeError: normalized = unicodedata.normalize('NFKD', s) return ''.join([c for c in normalized if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. Parameters ---------- s : string The string to strip See Also -------- strip_accents_unicode : Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. Parameters ---------- s : string The string to strip """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, str): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None else: # assume it's a collection return frozenset(stop) class _VectorizerMixin: """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols. The decoding strategy depends on the vectorizer parameters. Parameters ---------- doc : str The string to decode. Returns ------- doc: str A string of unicode symbols. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or " "unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens if min_n == 1: # no need to do any slicing for unigrams # just iterate through the original tokens tokens = list(original_tokens) min_n += 1 else: tokens = [] n_original_tokens = len(original_tokens) # bind method outside of loop to reduce overhead tokens_append = tokens.append space_join = " ".join for n in range(min_n, min(max_n + 1, n_original_tokens + 1)): for i in range(n_original_tokens - n + 1): tokens_append(space_join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) min_n, max_n = self.ngram_range if min_n == 1: # no need to do any slicing for unigrams # iterate through the string ngrams = list(text_document) min_n += 1 else: ngrams = [] # bind method outside of loop to reduce overhead ngrams_append = ngrams.append for n in range(min_n, min(max_n + 1, text_len + 1)): for i in range(text_len - n + 1): ngrams_append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams operating only inside word boundaries. n-grams at the edges of words are padded with space.""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] # bind method outside of loop to reduce overhead ngrams_append = ngrams.append for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in range(min_n, max_n + 1): offset = 0 ngrams_append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams_append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization. Returns ------- preprocessor: callable A function to preprocess the text before tokenization. """ if self.preprocessor is not None: return self.preprocessor # accent stripping if not self.strip_accents: strip_accents = None elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) return partial( _preprocess, accent_function=strip_accents, lower=self.lowercase ) def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens. Returns ------- tokenizer: callable A function to split a string into a sequence of tokens. """ if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) if token_pattern.groups > 1: raise ValueError( "More than 1 capturing group in token pattern. Only a single " "group should be captured." ) return token_pattern.findall def get_stop_words(self): """Build or fetch the effective stop words list. Returns ------- stop_words: list or None A list of stop words. """ return _check_stop_list(self.stop_words) def _check_stop_words_consistency(self, stop_words, preprocess, tokenize): """Check if stop words are consistent Returns ------- is_consistent : True if stop words are consistent with the preprocessor and tokenizer, False if they are not, None if the check was previously performed, "error" if it could not be performed (e.g. because of the use of a custom preprocessor / tokenizer) """ if id(self.stop_words) == getattr(self, '_stop_words_id', None): # Stop words are were previously validated return None # NB: stop_words is validated, unlike self.stop_words try: inconsistent = set() for w in stop_words or (): tokens = list(tokenize(preprocess(w))) for token in tokens: if token not in stop_words: inconsistent.add(token) self._stop_words_id = id(self.stop_words) if inconsistent: warnings.warn('Your stop_words may be inconsistent with ' 'your preprocessing. Tokenizing the stop ' 'words generated tokens %r not in ' 'stop_words.' % sorted(inconsistent)) return not inconsistent except Exception: # Failed to check stop words consistency (e.g. because a custom # preprocessor or tokenizer was used) self._stop_words_id = id(self.stop_words) return 'error' def build_analyzer(self): """Return a callable that handles preprocessing, tokenization and n-grams generation. Returns ------- analyzer: callable A function to handle preprocessing, tokenization and n-grams generation. """ if callable(self.analyzer): return partial( _analyze, analyzer=self.analyzer, decoder=self.decode ) preprocess = self.build_preprocessor() if self.analyzer == 'char': return partial(_analyze, ngrams=self._char_ngrams, preprocessor=preprocess, decoder=self.decode) elif self.analyzer == 'char_wb': return partial(_analyze, ngrams=self._char_wb_ngrams, preprocessor=preprocess, decoder=self.decode) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() self._check_stop_words_consistency(stop_words, preprocess, tokenize) return partial(_analyze, ngrams=self._word_ngrams, tokenizer=tokenize, preprocessor=preprocess, decoder=self.decode, stop_words=stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _validate_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if isinstance(vocabulary, set): vocabulary = sorted(vocabulary) if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(vocabulary.values()) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in range(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False def _check_vocabulary(self): """Check if vocabulary is empty or missing (not fitted)""" if not hasattr(self, 'vocabulary_'): self._validate_vocabulary() if not self.fixed_vocabulary_: raise NotFittedError("Vocabulary not fitted or provided") if len(self.vocabulary_) == 0: raise ValueError("Vocabulary is empty") def _validate_params(self): """Check validity of ngram_range parameter""" min_n, max_m = self.ngram_range if min_n > max_m: raise ValueError( "Invalid value for ngram_range=%s " "lower boundary larger than the upper boundary." % str(self.ngram_range)) def _warn_for_unused_params(self): if self.tokenizer is not None and self.token_pattern is not None: warnings.warn("The parameter 'token_pattern' will not be used" " since 'tokenizer' is not None'") if self.preprocessor is not None and callable(self.analyzer): warnings.warn("The parameter 'preprocessor' will not be used" " since 'analyzer' is callable'") if (self.ngram_range != (1, 1) and self.ngram_range is not None and callable(self.analyzer)): warnings.warn("The parameter 'ngram_range' will not be used" " since 'analyzer' is callable'") if self.analyzer != 'word' or callable(self.analyzer): if self.stop_words is not None: warnings.warn("The parameter 'stop_words' will not be used" " since 'analyzer' != 'word'") if self.token_pattern is not None and \ self.token_pattern != r"(?u)\b\w\w+\b": warnings.warn("The parameter 'token_pattern' will not be used" " since 'analyzer' != 'word'") if self.tokenizer is not None: warnings.warn("The parameter 'tokenizer' will not be used" " since 'analyzer' != 'word'") class HashingVectorizer(TransformerMixin, _VectorizerMixin, BaseEstimator): r"""Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : {'filename', 'file', 'content'}, default='content' - If `'filename'`, the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. - If `'file'`, the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. - If `'content'`, the input is expected to be a sequence of items that can be of type string or byte. encoding : string, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode'}, default=None Remove accents and perform other character normalization during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`. lowercase : bool, default=True Convert all characters to lowercase before tokenizing. preprocessor : callable, default=None Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. Only applies if ``analyzer is not callable``. tokenizer : callable, default=None Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. stop_words : {'english'}, list, default=None If 'english', a built-in stop word list for English is used. There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`). If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. token_pattern : str, default=r"(?u)\\b\\w\\w+\\b" Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). If there is a capturing group in token_pattern then the captured group content, not the entire match, becomes the token. At most one capturing group is permitted. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. Only applies if ``analyzer is not callable``. analyzer : {'word', 'char', 'char_wb'} or callable, default='word' Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. .. versionchanged:: 0.21 Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data is first read from the file and then passed to the given callable analyzer. n_features : int, default=(2 ** 20) The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. binary : bool, default=False. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. norm : {'l1', 'l2'}, default='l2' Norm used to normalize term vectors. None for no normalization. alternate_sign : bool, default=True When True, an alternating sign is added to the features as to approximately conserve the inner product in the hashed space even for small n_features. This approach is similar to sparse random projection. .. versionadded:: 0.19 dtype : type, default=np.float64 Type of the matrix returned by fit_transform() or transform(). Examples -------- >>> from sklearn.feature_extraction.text import HashingVectorizer >>> corpus = [ ... 'This is the first document.', ... 'This document is the second document.', ... 'And this is the third one.', ... 'Is this the first document?', ... ] >>> vectorizer = HashingVectorizer(n_features=2**4) >>> X = vectorizer.fit_transform(corpus) >>> print(X.shape) (4, 16) See Also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, *, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', alternate_sign=True, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.alternate_sign = alternate_sign self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. """ # triggers a parameter validation if isinstance(X, str): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._warn_for_unused_params() self._validate_params() self._get_hasher().fit(X, y=y) return self def transform(self, X): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix. """ if isinstance(X, str): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._validate_params() analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X def fit_transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix. """ return self.fit(X, y).transform(X) def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, alternate_sign=self.alternate_sign) def _more_tags(self): return {'X_types': ['string']} def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) else: return np.diff(X.indptr) class CountVectorizer(_VectorizerMixin, BaseEstimator): r"""Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.csr_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : {'filename', 'file', 'content'}, default='content' - If `'filename'`, the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. - If `'file'`, the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. - If `'content'`, the input is expected to be a sequence of items that can be of type string or byte. encoding : string, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode'}, default=None Remove accents and perform other character normalization during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`. lowercase : bool, default=True Convert all characters to lowercase before tokenizing. preprocessor : callable, default=None Override the preprocessing (strip_accents and lowercase) stage while preserving the tokenizing and n-grams generation steps. Only applies if ``analyzer is not callable``. tokenizer : callable, default=None Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. stop_words : {'english'}, list, default=None If 'english', a built-in stop word list for English is used. There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`). If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. token_pattern : str, default=r"(?u)\\b\\w\\w+\\b" Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). If there is a capturing group in token_pattern then the captured group content, not the entire match, becomes the token. At most one capturing group is permitted. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different word n-grams or char n-grams to be extracted. All values of n such such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. Only applies if ``analyzer is not callable``. analyzer : {'word', 'char', 'char_wb'} or callable, default='word' Whether the feature should be made of word n-gram or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. .. versionchanged:: 0.21 Since v0.21, if ``input`` is ``filename`` or ``file``, the data is first read from the file and then passed to the given callable analyzer. max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, default=None Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : bool, default=False If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, default=np.int64 Type of the matrix returned by fit_transform() or transform(). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. fixed_vocabulary_ : bool True if a fixed vocabulary of term to indices mapping is provided by the user. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. Examples -------- >>> from sklearn.feature_extraction.text import CountVectorizer >>> corpus = [ ... 'This is the first document.', ... 'This document is the second document.', ... 'And this is the third one.', ... 'Is this the first document?', ... ] >>> vectorizer = CountVectorizer() >>> X = vectorizer.fit_transform(corpus) >>> print(vectorizer.get_feature_names()) ['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', 'this'] >>> print(X.toarray()) [[0 1 1 1 0 0 1 0 1] [0 2 0 1 0 1 1 0 1] [1 0 0 1 1 0 1 1 1] [0 1 1 1 0 0 1 0 1]] >>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2)) >>> X2 = vectorizer2.fit_transform(corpus) >>> print(vectorizer2.get_feature_names()) ['and this', 'document is', 'first document', 'is the', 'is this', 'second document', 'the first', 'the second', 'the third', 'third one', 'this document', 'this is', 'this the'] >>> print(X2.toarray()) [[0 0 1 1 0 0 1 0 0 0 0 1 0] [0 1 0 1 0 1 0 1 0 0 1 0 0] [1 0 0 1 0 0 0 0 1 1 0 1 0] [0 0 1 0 1 0 1 0 0 0 0 0 1]] See Also -------- HashingVectorizer, TfidfVectorizer Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, *, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df or min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(vocabulary.items()) map_index = np.empty(len(sorted_features), dtype=X.indices.dtype) for new_val, (term, old_val) in enumerate(sorted_features): vocabulary[term] = new_val map_index[old_val] = new_val X.indices = map_index.take(X.indices, mode='clip') return X def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: tfs = np.asarray(X.sum(axis=0)).ravel() mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(vocabulary.items()): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = [] indptr = [] if self.lowercase: for vocab in vocabulary: if any(map(str.isupper, vocab)): warnings.warn("Upper case characters found in" " vocabulary while 'lowercase'" " is True. These entries will not" " be matched with any documents") break values = _make_int_array() indptr.append(0) for doc in raw_documents: feature_counter = {} for feature in analyze(doc): try: feature_idx = vocabulary[feature] if feature_idx not in feature_counter: feature_counter[feature_idx] = 1 else: feature_counter[feature_idx] += 1 except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue j_indices.extend(feature_counter.keys()) values.extend(feature_counter.values()) indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1 if _IS_32BIT: raise ValueError(('sparse CSR array has {} non-zero ' 'elements and requires 64 bit indexing, ' 'which is unsupported with 32 bit Python.') .format(indptr[-1])) indices_dtype = np.int64 else: indices_dtype = np.int32 j_indices = np.asarray(j_indices, dtype=indices_dtype) indptr = np.asarray(indptr, dtype=indices_dtype) values = np.frombuffer(values, dtype=np.intc) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sort_indices() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self._warn_for_unused_params() self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return document-term matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array of shape (n_samples, n_features) Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. if isinstance(raw_documents, str): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._validate_params() self._validate_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") if max_features is not None: X = self._sort_features(X, vocabulary) X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) if max_features is None: X = self._sort_features(X, vocabulary) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix. """ if isinstance(raw_documents, str): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._check_vocabulary() # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document-term matrix. Returns ------- X_inv : list of arrays of shape (n_samples,) List of arrays of terms. """ self._check_vocabulary() # We need CSR format for fast row manipulations. X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] if sp.issparse(X): return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] else: return [inverse_vocabulary[np.flatnonzero(X[i, :])].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name. Returns ------- feature_names : list A list of feature names. """ self._check_vocabulary() return [t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))] def _more_tags(self): return {'X_types': ['string']} def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(TransformerMixin, BaseEstimator): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The formula that is used to compute the tf-idf for a term t of a document d in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where n is the total number of documents in the document set and df(t) is the document frequency of t; the document frequency is the number of documents in the document set that contain the term t. The effect of adding "1" to the idf in the equation above is that terms with zero idf, i.e., terms that occur in all documents in a training set, will not be entirely ignored. (Note that the idf formula above differs from the standard textbook notation that defines the idf as idf(t) = log [ n / (df(t) + 1) ]). If ``smooth_idf=True`` (the default), the constant "1" is added to the numerator and denominator of the idf as if an extra document was seen containing every term in the collection exactly once, which prevents zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1. Furthermore, the formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR as follows: Tf is "n" (natural) by default, "l" (logarithmic) when ``sublinear_tf=True``. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when ``norm='l2'``, "n" (none) when ``norm=None``. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- norm : {'l1', 'l2'}, default='l2' Each output row will have unit norm, either: * 'l2': Sum of squares of vector elements is 1. The cosine similarity between two vectors is their dot product when l2 norm has been applied. * 'l1': Sum of absolute values of vector elements is 1. See :func:`preprocessing.normalize` use_idf : bool, default=True Enable inverse-document-frequency reweighting. smooth_idf : bool, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : bool, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- idf_ : array of shape (n_features) The inverse document frequency (IDF) vector; only defined if ``use_idf`` is True. .. versionadded:: 0.20 n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 1.0 Examples -------- >>> from sklearn.feature_extraction.text import TfidfTransformer >>> from sklearn.feature_extraction.text import CountVectorizer >>> from sklearn.pipeline import Pipeline >>> import numpy as np >>> corpus = ['this is the first document', ... 'this document is the second document', ... 'and this is the third one', ... 'is this the first document'] >>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the', ... 'and', 'one'] >>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)), ... ('tfid', TfidfTransformer())]).fit(corpus) >>> pipe['count'].transform(corpus).toarray() array([[1, 1, 1, 1, 0, 1, 0, 0], [1, 2, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 0, 1, 1, 1], [1, 1, 1, 1, 0, 1, 0, 0]]) >>> pipe['tfid'].idf_ array([1. , 1.22314355, 1.51082562, 1. , 1.91629073, 1. , 1.91629073, 1.91629073]) >>> pipe.transform(corpus).shape (4, 8) References ---------- .. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74. .. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120. """ def __init__(self, *, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape n_samples, n_features) A matrix of term/token counts. """ X = self._validate_data(X, accept_sparse=('csr', 'csc')) if not sp.issparse(X): X = sp.csr_matrix(X) dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) df = df.astype(dtype, **_astype_copy_false(df)) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = np.log(n_samples / df) + 1 self._idf_diag = sp.diags(idf, offsets=0, shape=(n_features, n_features), format='csr', dtype=dtype) return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix of (n_samples, n_features) a matrix of term/token counts copy : bool, default=True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : sparse matrix of shape (n_samples, n_features) """ X = self._validate_data(X, accept_sparse='csr', dtype=FLOAT_DTYPES, copy=copy, reset=False) if not sp.issparse(X): X = sp.csr_matrix(X, dtype=np.float64) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: # idf_ being a property, the automatic attributes detection # does not work as usual and we need to specify the attribute # name: check_is_fitted(self, attributes=["idf_"], msg='idf vector is not fitted') # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): # if _idf_diag is not set, this will raise an attribute error, # which means hasattr(self, "idf_") is False return np.ravel(self._idf_diag.sum(axis=0)) @idf_.setter def idf_(self, value): value = np.asarray(value, dtype=np.float64) n_features = value.shape[0] self._idf_diag = sp.spdiags(value, diags=0, m=n_features, n=n_features, format='csr') def _more_tags(self): return {'X_types': 'sparse'} class TfidfVectorizer(CountVectorizer): r"""Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to :class:`CountVectorizer` followed by :class:`TfidfTransformer`. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : {'filename', 'file', 'content'}, default='content' - If `'filename'`, the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. - If `'file'`, the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. - If `'content'`, the input is expected to be a sequence of items that can be of type string or byte. encoding : str, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode'}, default=None Remove accents and perform other character normalization during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`. lowercase : bool, default=True Convert all characters to lowercase before tokenizing. preprocessor : callable, default=None Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. Only applies if ``analyzer is not callable``. tokenizer : callable, default=None Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. analyzer : {'word', 'char', 'char_wb'} or callable, default='word' Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. .. versionchanged:: 0.21 Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data is first read from the file and then passed to the given callable analyzer. stop_words : {'english'}, list, default=None If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`). If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. token_pattern : str, default=r"(?u)\\b\\w\\w+\\b" Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). If there is a capturing group in token_pattern then the captured group content, not the entire match, becomes the token. At most one capturing group is permitted. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. Only applies if ``analyzer is not callable``. max_df : float or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float in range [0.0, 1.0], the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float in range of [0.0, 1.0], the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, default=None Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : bool, default=False If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs). dtype : dtype, default=float64 Type of the matrix returned by fit_transform() or transform(). norm : {'l1', 'l2'}, default='l2' Each output row will have unit norm, either: * 'l2': Sum of squares of vector elements is 1. The cosine similarity between two vectors is their dot product when l2 norm has been applied. * 'l1': Sum of absolute values of vector elements is 1. See :func:`preprocessing.normalize`. use_idf : bool, default=True Enable inverse-document-frequency reweighting. smooth_idf : bool, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : bool, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. fixed_vocabulary_ : bool True if a fixed vocabulary of term to indices mapping is provided by the user. idf_ : array of shape (n_features,) The inverse document frequency (IDF) vector; only defined if ``use_idf`` is True. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See Also -------- CountVectorizer : Transforms text into a sparse matrix of n-gram counts. TfidfTransformer : Performs the TF-IDF transformation from a provided matrix of counts. Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. Examples -------- >>> from sklearn.feature_extraction.text import TfidfVectorizer >>> corpus = [ ... 'This is the first document.', ... 'This document is the second document.', ... 'And this is the third one.', ... 'Is this the first document?', ... ] >>> vectorizer = TfidfVectorizer() >>> X = vectorizer.fit_transform(corpus) >>> print(vectorizer.get_feature_names()) ['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', 'this'] >>> print(X.shape) (4, 9) """ def __init__(self, *, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.float64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super().__init__( input=input, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ @idf_.setter def idf_(self, value): self._validate_vocabulary() if hasattr(self, 'vocabulary_'): if len(self.vocabulary_) != len(value): raise ValueError("idf length = %d must be equal " "to vocabulary size = %d" % (len(value), len(self.vocabulary))) self._tfidf.idf_ = value def _check_params(self): if self.dtype not in FLOAT_DTYPES: warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will " "be converted to np.float64." .format(FLOAT_DTYPES, self.dtype), UserWarning) def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. y : None This parameter is not needed to compute tfidf. Returns ------- self : object Fitted vectorizer. """ self._check_params() self._warn_for_unused_params() X = super().fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return document-term matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. y : None This parameter is ignored. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix. """ self._check_params() X = super().fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix of (n_samples, n_features) Tf-idf-weighted document-term matrix. """ check_is_fitted(self, msg='The TF-IDF vectorizer is not fitted') X = super().transform(raw_documents) return self._tfidf.transform(X, copy=False) def _more_tags(self): return {'X_types': ['string'], '_skip_test': True}
bsd-3-clause
227,654,248,893,633,020
36.609367
79
0.595432
false
jameslyons/pycipher
tests/test_simple.py
1
1950
from pycipher.simplesubstitution import SimpleSubstitution import unittest class TestSimple(unittest.TestCase): def test_encipher(self): keys = ('abronjdfuetchiszlgwqvxkymp', 'mufykewgqtnrlopcbadsvxzijh', 'rtuzesbxjaniypqclghmvwodkf', 'ymdsvtxizewurqfnbgjlckoahp', 'lcvmwezojbgdtsrniufyqphxak') plaintext = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' ciphertext = ('abronjdfuetchiszlgwqvxkympabronjdfuetchiszlgwqvxkymp', 'mufykewgqtnrlopcbadsvxzijhmufykewgqtnrlopcbadsvxzijh', 'rtuzesbxjaniypqclghmvwodkfrtuzesbxjaniypqclghmvwodkf', 'ymdsvtxizewurqfnbgjlckoahpymdsvtxizewurqfnbgjlckoahp', 'lcvmwezojbgdtsrniufyqphxaklcvmwezojbgdtsrniufyqphxak') for i,key in enumerate(keys): enc = SimpleSubstitution(key).encipher(plaintext) self.assertEqual(enc.upper(), ciphertext[i].upper()) def test_decipher(self): keys = ('zidvgmlefwpsktrnaoqjyubhxc', 'wyqtnaopsxigdbzlhumvckrejf', 'bqcjpkfeuzlnxmgdastwhriyvo', 'enwdirhmykbfzsaojulpcqtvxg', 'rtzeawivnubkyjfchsldomqxpg') ciphertext = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' plaintext = ('qwzchiexbtmgfprksolnvdjyuaqwzchiexbtmgfprksolnvdjyua', 'fnumxzlqkyvpseghcwidrtajbofnumxzlqkyvpseghcwidrtajbo', 'qacphgouwdfknlzebvrsiytmxjqacphgouwdfknlzebvrsiytmxj', 'okudalzgeqjshbptvfnwrxcyimokudalzgeqjshbptvfnwrxcyim', 'ekptdozqgnlsviuywarbjhfxmcekptdozqgnlsviuywarbjhfxmc') for i,key in enumerate(keys): dec = SimpleSubstitution(key).decipher(ciphertext) self.assertEqual(dec.upper(), plaintext[i].upper()) if __name__ == '__main__': unittest.main()
mit
7,769,010,517,048,307,000
49
77
0.663077
false
graik/biskit
archive_biskit2/Biskit/Dock/settings.py
1
3174
## ## Biskit, a toolkit for the manipulation of macromolecular structures ## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You find a copy of the GNU General Public License in the file ## license.txt along with this program; if not, write to the Free ## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ## ## """ Settings ======== This module provides Dock-global settings as fields. Throughout Biskit.Dock a (environment-dependent) parameter such as, e.g., ssh_bin can be addressed as: >>> import Biskit.Dock.settings as S >>> bin = S.ssh_bin However, since a user should not be required to hack python modules, ssh_bin is not actually defined in settings.py. Instead, the value is taken from C{~/.biskit/settings_Dock.cfg} -- which should have an entry like C{ssh_bin=/bin/ssh # comment}. If this entry (or the config file) is not found, settings.py uses the default value from C{biskit/Biskit/data/defaults/settings_Dock.cfg}. If missing, the user configuration file C{~/.biskit/settings_Dock.cfg} is created automatically during the startup of Biskit (i.e. for any import). The auto-generated file only contains parameters for which the default values don't seem to work (invalid paths or binaries). See L{Biskit.SettingsManager} Summary for Biskit users ------------------------ If you want to change a biskit parameter, do so in C{~/.biskit/settings_Dock.cfg} Summary for Biskit developpers ------------------------------ If you want to create a new user-adjustable parameter, do so in C{biskit/Biskit/data/defaults/settings_Dock.cfg}. Summary for all --------------- !Dont't touch C{settings.py}! """ import Biskit as B import Biskit.tools as T import Biskit.SettingsManager as M import user, sys __CFG_DEFAULT = T.dataRoot() + '/defaults/settings_Dock.cfg' __CFG_USER = user.home + '/.biskit/settings_Dock.cfg' try: m = M.SettingsManager(__CFG_DEFAULT, __CFG_USER, createmissing=True ) m.updateNamespace( locals() ) except Exception, why: B.EHandler.fatal( 'Error importing Biskit.Dock settings') ############################## ## Check environment variables env = {} hex_env = {'HEX_ROOT':'/home/Bis/johan/APPLICATIONS/HEX', 'HEX_CACHE':'/home/Bis/johan/APPLICATIONS/HEX/hex_cache', 'HEX_VERSION':'4b'} prosaII_env = {'PROSA_BASE':'/home/Bis/shared/rh73/prosa/prosabase/'} env.update(hex_env) env.update(prosaII_env) ###################### ## clean up name space del B, T, M, user, sys del __CFG_DEFAULT, __CFG_USER, m ################ ## empty test ## import Biskit.test as BT class Test(BT.BiskitTest): """Mock test, settings is always executed anyway.""" pass
gpl-3.0
-4,584,866,479,027,789,300
29.815534
74
0.694707
false
BioSeq/Genetics-of-Race-Analysis
gORAnalysis.py
1
9988
#!/usr/bin/env python # # gORAnalysis.py # Author: Philip Braunstein # Copyright (c) 2015 BioSeq # # This is the analysis for the BioSeq Genetics of Race Experiment. It uses VCF # files generated from MiSeq to create a FASTA file of what each sample would # have looked like. This output file can be run easily analyzed with CLUSTAL # Omega to determine similarity between samples and phylogenetic tree. # import Tkinter as tk from sys import argv from sys import exit import os import uuid ############# CONSTANTS ################# # Files REF1 = "HVRI.fasta" REF2 = "HVRII.fasta" CONFIG = "config2.txt" OUTPUT = "gorOutput-" + str(uuid.uuid4()) + ".fasta" # Indexes POS_IDX = 1 REF_IDX = 3 ALT_IDX = 4 FILTER_IDX = 6 # Strings PASS = "PASS" # Numerical Constants HVRI_OFFSET = 15951 PAD_X = 5 PAD_Y = 5 ############################################ ########### CUSTOM CLASES ################## # UI class UIApp(tk.Frame): def __init__(self, parent): tk.Frame.__init__(self, parent) self.parent = parent self.initUI() def initUI(self): self.parent.title("BioSeq Analysis") self.grid() # Make Labels desc = tk.Label(self, text="Genetics of Race Analysis Program") instr = tk.Label(self, text="File Location") # Make entry ent = tk.Entry(self) # Make buttons def callbackQuit(): self.parent.destroy() quitButton = tk.Button(self, text="Quit", command=callbackQuit) def callbackOk(): path, fileName = analyze(ent.get().strip()) self.reportWindow = tk.Toplevel(self.parent) self.app = reportWindow(self.reportWindow, self.parent, path, fileName) okButton.config(state='disabled') okButton = tk.Button(self, text="Analyze", command=callbackOk) # Grid them desc.grid(row=0, columnspan=2) instr.grid(row=1, column=0) ent.grid(row=1, column=1) okButton.grid(row=2, column=0) quitButton.grid(row=2, column=1, sticky=tk.E) class reportWindow(tk.Frame): def __init__(self, parent, grandparent, path, fileName): self.parent = parent self.grandparent = grandparent self.path = path self.fileName = fileName def callbackQuit(): self.parent.destroy() self.grandparent.destroy() # destroy original window too tk.Frame.__init__(self, parent) # parent constructor desc = tk.Label(self, text="Genetics of Race Analysis Program") desc2 = tk.Label(self, text="Results written to " + self.fileName) desc3 = tk.Label(self, text="The results file is located here: " +\ self.path) quitButton = tk.Button(self, text="Quit", command=callbackQuit) self.grid() desc.grid() desc2.grid() desc3.grid() quitButton.grid(sticky = tk.E) # Custom exceptions class UserException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return str(self.msg) class InternalException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) ############################################ def main(): root = tk.Tk() app = UIApp(root) root.mainloop() exit(0) def analyze(path): ref1 = readInRef(REF1) ref2 = readInRef(REF2) pairs = readInConfig(path) # Generate the FASTA files fastaDict = makeFASTAs(path, pairs, ref1, ref2) writeOut(fastaDict, pairs, path) return (path, OUTPUT) # return path and output file name # Is given a file name and reads in the nucleotide seq as a string # returns this string. def readInRef(fileName): toReturn = '' with open(fileName, 'r') as filer: for line in filer: if line.startswith(">"): continue else: toReturn += line.strip() return toReturn # Reads in the config file into a dictionary so that # the first entry is the key and the second entry is # the value on each line. Returns a dictionary of this form def readInConfig(path): dictio = {} with open(os.path.join(path, CONFIG), 'r') as filer: for line in filer: listl = line.split(",") listl = [x.strip() for x in listl] dictio[listl[0]] = listl[1] return dictio # path is the path to the directory containing to VCF files # Returns a dictionary of {sampleId: newRef} where the newRef is # the reference with the changes incorporated from the VCF file and # the sampleId is the Id listed in the config file def makeFASTAs(path, pairs, ref1, ref2): toReturn = {} # Get VCF files vcfFiles = [x for x in os.listdir(path) if x.endswith(".vcf")] # Get lists of ref1 and ref2 to be used firsts = pairs.keys() seconds = pairs.values() for vcf in vcfFiles: nub = vcf.split(".")[0] # Figure out which region the VCF file comes from if nub in firsts: newRef = makeChanges(ref1, vcf, path, True) elif nub in seconds: newRef = makeChanges(ref2, vcf, path, False) else: continue # vcf file not in config file, skip it #print vcf toReturn[nub] = newRef #print newRef #print return toReturn # If there are multiple alleles for the same variant, it makes the greedy # choice and choses the first one by default # When the variant file is from HVRI, need to throw out records below a # threshhold # SNPs prioritized over INDELs def makeChanges(seq, vcf, path, isHVRI): indelStack = [] validSnpPoses = [] nucList = [x for x in seq] with open(os.path.join(path, vcf), 'r') as filer: for line in filer: if line.startswith("#"): # Skip comment lines continue listL = line.split("\t") # make everything upper case so no case clashes listL = [x.upper() for x in listL] # position should be an int not a string listL[POS_IDX] = int(listL[POS_IDX]) if listL[FILTER_IDX] != PASS: # Skip vars that don't pass filter continue pos = listL[POS_IDX] refNuc = oneInd(nucList, pos) vcfOldNuc = listL[REF_IDX] vcfNewNuc = listL[ALT_IDX] # More than one allele, just grab first one if "," in vcfNewNuc: vcfNewNuc = vcfNewNuc.split(",")[0] listL[ALT_IDX] = vcfNewNuc # These two lines protect against variants from other regions # that end up in these files for some reason if isHVRI: # Adjust for offset within file pos = pos - HVRI_OFFSET refNuc = oneInd(nucList, pos) # Recalc refNuc listL[POS_IDX] = pos if pos < 1: continue if refNuc is None: continue ######## # Save Indels for later if len(vcfOldNuc) != 1 or len(vcfNewNuc) != 1: indelStack.append(listL) continue # VCF doesn't match reference, something wrong if vcfOldNuc != refNuc: raise UserException("VCF REF entry doesn't match" +\ " NCBI reference sequence. \nVCF: " + listL[REF_IDX] +\ "\nREF: " + oneInd(nucList, pos) + "\nVCF_POS: " +\ str(pos) + "\nSee VCF_File: " + vcf) # Replace SNP nucList[pos - 1] = vcfNewNuc validSnpPoses.append(pos) return addIndels(nucList, indelStack, validSnpPoses, vcf) # Returns the entry in the list at position pos - 1. It is used # to adjust between 1-indexed systems and 0-indexed systems def oneInd(ls, pos): try: return ls[pos - 1] except IndexError: return None # Inputs a list of characters that represenets a nucleotide sequence # a list of Indel intries and a list of positions not to add Indels in # as they were already included as SNPs. # Assumes position in each indel list is already an int (not a string) # Assumes all multiple alleles have been replaced by a single one (in # alternate) #TODO: TEST THIS MORE AND DOUBLE CHECK LOGIC def addIndels(nucList, indels, exceptions, fileName): isDel = False # sort indels by position (should already be done, but to be safe) indels.sort(key=lambda x: x[POS_IDX]) indels = [x for x in indels if x[POS_IDX] not in exceptions] for variant in indels: # insertion if len(variant[REF_IDX]) <= len(variant[ALT_IDX]): nucList[variant[POS_IDX] - 1] = variant[ALT_IDX] # print "INSERTION" else: # deletion # no. nucleotides deleted # print "DELETION", fileName isDel = True idx = variant[POS_IDX] - 1 diff = len(variant[REF_IDX]) - len(variant[ALT_IDX]) for i in range(idx + 1, idx + diff + 1): nucList[i] = '-' # Dashes mark a deletion nucString = "".join(nucList) nucString = nucString.replace('-', '') return nucString def writeOut(fastaDict, pairs, path): filew = open(os.path.join(path, OUTPUT), 'w') firsts = pairs.keys() for key in firsts: try: keySeq = fastaDict[key] except KeyError: continue try: valSeq = fastaDict[pairs[key]] except KeyError: continue # If you get here, then there are sequences for both key and val filew.write(">" + key + "_" + pairs[key] + "\n") filew.write(keySeq + valSeq + "\n") filew.close() if __name__ == '__main__': main()
mit
3,348,724,493,650,054,000
28.376471
79
0.575891
false
uwosh/uwosh.fieldworkflow
uwosh/fieldworkflow/tests.py
1
1490
import unittest from zope.testing import doctestunit from zope.component import testing from Testing import ZopeTestCase as ztc from Products.Five import zcml from Products.Five import fiveconfigure from Products.PloneTestCase import PloneTestCase as ptc from Products.PloneTestCase.layer import PloneSite ptc.setupPloneSite() import uwosh.fieldworkflow class TestCase(ptc.PloneTestCase): class layer(PloneSite): @classmethod def setUp(cls): fiveconfigure.debug_mode = True zcml.load_config('configure.zcml', uwosh.fieldworkflow) fiveconfigure.debug_mode = False @classmethod def tearDown(cls): pass def test_suite(): return unittest.TestSuite([ # Unit tests #doctestunit.DocFileSuite( # 'README.txt', package='uwosh.fieldworkflow', # setUp=testing.setUp, tearDown=testing.tearDown), #doctestunit.DocTestSuite( # module='uwosh.fieldworkflow.mymodule', # setUp=testing.setUp, tearDown=testing.tearDown), # Integration tests that use PloneTestCase #ztc.ZopeDocFileSuite( # 'README.txt', package='uwosh.fieldworkflow', # test_class=TestCase), #ztc.FunctionalDocFileSuite( # 'browser.txt', package='uwosh.fieldworkflow', # test_class=TestCase), ]) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
gpl-2.0
-3,443,589,749,189,728,000
26.592593
61
0.64698
false
hologram-io/hologram-python
Hologram/Authentication/CSRPSKAuthentication.py
1
2419
# CSRPSKAuthentication.py - Hologram Python SDK CSRPSKAuthentication interface # # Author: Hologram <[email protected]> # # Copyright 2016 - Hologram (Konekt, Inc.) # # This CSRPSKAuthentication file implements the CSRPSK authentication interface. # # LICENSE: Distributed under the terms of the MIT License # import json from Exceptions.HologramError import AuthenticationError from Hologram.Authentication.HologramAuthentication import HologramAuthentication DEVICE_KEY_LEN = 8 class CSRPSKAuthentication(HologramAuthentication): def __init__(self, credentials): self._data = {} super().__init__(credentials=credentials) def buildPayloadString(self, messages, topics=None, modem_type=None, modem_id=None, version=None): self.enforceValidDeviceKey() super().buildPayloadString(messages, topics=topics, modem_type=modem_type, modem_id=modem_id, version=version) payload = json.dumps(self._data) + "\r\r" return payload.encode() def buildSMSPayloadString(self, destination_number, message): self.enforceValidDeviceKey() send_data = 'S' + self.credentials['devicekey'] send_data += destination_number + ' ' + message send_data += "\r\r" return send_data.encode() def buildAuthString(self, timestamp=None, sequence_number=None): self._data['k'] = self.credentials['devicekey'] def buildMetadataString(self, modem_type, modem_id, version): formatted_string = f"{self.build_modem_type_id_str(modem_type, modem_id)}-{version}" self._data['m'] = self.metadata_version.decode() + formatted_string def buildTopicString(self, topics): self._data['t'] = topics def buildMessageString(self, messages): self._data['d'] = messages def enforceValidDeviceKey(self): if not isinstance(self.credentials, dict): raise AuthenticationError('Credentials is not a dictionary') elif not self.credentials['devicekey']: raise AuthenticationError('Must set devicekey to use CSRPSKAuthentication') elif len(self.credentials['devicekey']) != DEVICE_KEY_LEN: raise AuthenticationError('Device key must be %d characters long' % DEVICE_KEY_LEN)
mit
5,625,506,043,620,905,000
35.104478
95
0.649442
false
dspmeng/code
scripts/GopBitRate.py
1
1764
import csv import itertools import numpy as np import getopt from os import path from os import system from sys import argv try: opts, args = getopt.getopt(argv[1:], 'f:') except getopt.GetoptError: print 'GopBitRate.py -f <frame rate> <stats csv>' print opts print args frameRate = 30 for opt, arg in opts: if opt == '-f': frameRate = int(arg) else: print 'unknow opt: ', opt print 'frame rate: %d' % frameRate esFile = args[0] stats = path.splitext(path.basename(esFile))[0] + '.csv' system('ffprobe -of csv -show_frames ' + esFile + '>' + stats) frameType = '' count = 0 totalFrames = 0 totalSize = 0 gopFrames = [] gopSize = [] size = 0 with open(stats, 'r') as f: reader = csv.reader(f) for row in itertools.islice(reader, 0, None): try: count = int(row[-5]) frameType = row[-6] totalFrames += 1 size = int(row[13]) totalSize += size if frameType == 'I': gopFrames.append(0) gopSize.append(0) gopFrames[-1] += 1 gopSize[-1] += size except Exception, e: print str(e) + ': %s' % row totalSize *= 8 print 'Total size (%d frames): %d bits' % (totalFrames, totalSize) print 'Average bitrate: %f Mbps' % (totalSize * frameRate / totalFrames / 1000000.0) normGopSize = map(lambda x,y:x/y*frameRate*8/1000000.0, gopSize, gopFrames) maxGopSize = max(normGopSize) maxGop = normGopSize.index(maxGopSize) print 'Maximum bitrate(Gop#%d): %f Mbps' % (maxGop, normGopSize[maxGop]) for i in np.argsort(normGopSize)[::-1][:10]: print 'GOP#%3d(%4d): %8d bytes, %6d frames -> %f Mbps' % (i, sum(gopFrames[0:i+1]) - gopFrames[i], gopSize[i], gopFrames[i], normGopSize[i])
apache-2.0
3,685,222,563,568,351,000
27
144
0.607143
false
Nekroze/librarian
docs/conf.py
1
8158
# -*- coding: utf-8 -*- # # complexity documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) cwd = os.getcwd() parent = os.path.dirname(cwd) sys.path.insert(0, parent) import librarian # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'librarian' copyright = u'2013, Taylor "Nekroze" Lawson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = librarian.__version__ # The full version, including alpha/beta/rc tags. release = librarian.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'librariandoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'librarian.tex', u'librarian Documentation', u'Taylor "Nekroze" Lawson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'librarian', u'librarian Documentation', [u'Taylor "Nekroze" Lawson'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'librarian', u'librarian Documentation', u'Taylor "Nekroze" Lawson', 'librarian', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
mit
-157,021,622,559,134,340
31.122047
80
0.706546
false
iandees/all-the-places
locations/spiders/hihostels.py
1
1934
# -*- coding: utf-8 -*- import scrapy import re from locations.items import GeojsonPointItem class HiHostelsSpider(scrapy.Spider): name = "hihostels" allowed_domains = ['hihostels.com'] start_urls = ( 'https://www.hihostels.com/sitemap.xml', ) def parse(self, response): response.selector.remove_namespaces() city_urls = response.xpath('//url/loc/text()').extract() regex = re.compile(r'http\S+hihostels.com/\S+/hostels/\S+') for path in city_urls: if not re.search(regex,path): pass else: yield scrapy.Request( path.strip(), callback=self.parse_store, ) def parse_store(self, response): properties = { 'name': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()), 'ref': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()), 'addr_full': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[0].split()), 'city': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[1].split()), 'postcode': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-2].split()), 'country': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-1].split()), 'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(), 'lon': float(response.xpath('//*[@id ="lon"]/@value').extract()[0]), 'lat': float(response.xpath('//*[@id ="lat"]/@value').extract()[0]), } yield GeojsonPointItem(**properties)
mit
2,543,737,011,314,564,600
45.047619
146
0.542399
false
senttech/Cura
cura/Settings/MachineManager.py
1
50213
# Copyright (c) 2016 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from PyQt5.QtWidgets import QMessageBox from UM.Application import Application from UM.Preferences import Preferences from UM.Logger import Logger from UM.Message import Message from UM.Settings.SettingRelation import RelationType import UM.Settings from cura.PrinterOutputDevice import PrinterOutputDevice from . import ExtruderManager from UM.i18n import i18nCatalog catalog = i18nCatalog("cura") import time import os class MachineManager(QObject): def __init__(self, parent = None): super().__init__(parent) self._active_container_stack = None self._global_container_stack = None Application.getInstance().globalContainerStackChanged.connect(self._onGlobalContainerChanged) ## When the global container is changed, active material probably needs to be updated. self.globalContainerChanged.connect(self.activeMaterialChanged) self.globalContainerChanged.connect(self.activeVariantChanged) self.globalContainerChanged.connect(self.activeQualityChanged) self._active_stack_valid = None self._onGlobalContainerChanged() ExtruderManager.getInstance().activeExtruderChanged.connect(self._onActiveExtruderStackChanged) self._onActiveExtruderStackChanged() ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeMaterialChanged) ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeVariantChanged) ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeQualityChanged) self.globalContainerChanged.connect(self.activeStackChanged) self.globalValueChanged.connect(self.activeStackChanged) ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeStackChanged) self._empty_variant_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_variant")[0] self._empty_material_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_material")[0] self._empty_quality_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality")[0] self._empty_quality_changes_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality_changes")[0] Preferences.getInstance().addPreference("cura/active_machine", "") self._global_event_keys = set() active_machine_id = Preferences.getInstance().getValue("cura/active_machine") self._printer_output_devices = [] Application.getInstance().getOutputDeviceManager().outputDevicesChanged.connect(self._onOutputDevicesChanged) if active_machine_id != "": # An active machine was saved, so restore it. self.setActiveMachine(active_machine_id) if self._global_container_stack and self._global_container_stack.getProperty("machine_extruder_count", "value") > 1: # Make sure _active_container_stack is properly initiated ExtruderManager.getInstance().setActiveExtruderIndex(0) self._auto_materials_changed = {} self._auto_hotends_changed = {} globalContainerChanged = pyqtSignal() activeMaterialChanged = pyqtSignal() activeVariantChanged = pyqtSignal() activeQualityChanged = pyqtSignal() activeStackChanged = pyqtSignal() globalValueChanged = pyqtSignal() # Emitted whenever a value inside global container is changed. activeValidationChanged = pyqtSignal() # Emitted whenever a validation inside active container is changed blurSettings = pyqtSignal() # Emitted to force fields in the advanced sidebar to un-focus, so they update properly outputDevicesChanged = pyqtSignal() def _onOutputDevicesChanged(self): for printer_output_device in self._printer_output_devices: printer_output_device.hotendIdChanged.disconnect(self._onHotendIdChanged) printer_output_device.materialIdChanged.disconnect(self._onMaterialIdChanged) self._printer_output_devices.clear() for printer_output_device in Application.getInstance().getOutputDeviceManager().getOutputDevices(): if isinstance(printer_output_device, PrinterOutputDevice): self._printer_output_devices.append(printer_output_device) printer_output_device.hotendIdChanged.connect(self._onHotendIdChanged) printer_output_device.materialIdChanged.connect(self._onMaterialIdChanged) self.outputDevicesChanged.emit() @pyqtProperty("QVariantList", notify = outputDevicesChanged) def printerOutputDevices(self): return self._printer_output_devices def _onHotendIdChanged(self, index, hotend_id): if not self._global_container_stack: return containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type="variant", definition=self._global_container_stack.getBottom().getId(), name=hotend_id) if containers: # New material ID is known extruder_manager = ExtruderManager.getInstance() extruders = list(extruder_manager.getMachineExtruders(self.activeMachineId)) matching_extruder = None for extruder in extruders: if str(index) == extruder.getMetaDataEntry("position"): matching_extruder = extruder break if matching_extruder and matching_extruder.findContainer({"type": "variant"}).getName() != hotend_id: # Save the material that needs to be changed. Multiple changes will be handled by the callback. self._auto_hotends_changed[str(index)] = containers[0].getId() self._printer_output_devices[0].materialHotendChangedMessage(self._materialHotendChangedCallback) else: Logger.log("w", "No variant found for printer definition %s with id %s" % (self._global_container_stack.getBottom().getId(), hotend_id)) def _autoUpdateHotends(self): extruder_manager = ExtruderManager.getInstance() for position in self._auto_hotends_changed: hotend_id = self._auto_hotends_changed[position] old_index = extruder_manager.activeExtruderIndex if old_index != int(position): extruder_manager.setActiveExtruderIndex(int(position)) else: old_index = None Logger.log("d", "Setting hotend variant of hotend %s to %s" % (position, hotend_id)) self.setActiveVariant(hotend_id) if old_index is not None: extruder_manager.setActiveExtruderIndex(old_index) def _onMaterialIdChanged(self, index, material_id): if not self._global_container_stack: return definition_id = "fdmprinter" if self._global_container_stack.getMetaDataEntry("has_machine_materials", False): definition_id = self._global_container_stack.getBottom().getId() extruder_manager = ExtruderManager.getInstance() containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "material", definition = definition_id, GUID = material_id) if containers: # New material ID is known extruders = list(extruder_manager.getMachineExtruders(self.activeMachineId)) matching_extruder = None for extruder in extruders: if str(index) == extruder.getMetaDataEntry("position"): matching_extruder = extruder break if matching_extruder and matching_extruder.findContainer({"type":"material"}).getMetaDataEntry("GUID") != material_id: # Save the material that needs to be changed. Multiple changes will be handled by the callback. self._auto_materials_changed[str(index)] = containers[0].getId() self._printer_output_devices[0].materialHotendChangedMessage(self._materialHotendChangedCallback) else: Logger.log("w", "No material definition found for printer definition %s and GUID %s" % (definition_id, material_id)) def _materialHotendChangedCallback(self, button): if button == QMessageBox.No: self._auto_materials_changed = {} self._auto_hotends_changed = {} return self._autoUpdateMaterials() self._autoUpdateHotends() def _autoUpdateMaterials(self): extruder_manager = ExtruderManager.getInstance() for position in self._auto_materials_changed: material_id = self._auto_materials_changed[position] old_index = extruder_manager.activeExtruderIndex if old_index != int(position): extruder_manager.setActiveExtruderIndex(int(position)) else: old_index = None Logger.log("d", "Setting material of hotend %s to %s" % (position, material_id)) self.setActiveMaterial(material_id) if old_index is not None: extruder_manager.setActiveExtruderIndex(old_index) def _onGlobalContainerChanged(self): if self._global_container_stack: self._global_container_stack.nameChanged.disconnect(self._onMachineNameChanged) self._global_container_stack.containersChanged.disconnect(self._onInstanceContainersChanged) self._global_container_stack.propertyChanged.disconnect(self._onPropertyChanged) material = self._global_container_stack.findContainer({"type": "material"}) material.nameChanged.disconnect(self._onMaterialNameChanged) quality = self._global_container_stack.findContainer({"type": "quality"}) quality.nameChanged.disconnect(self._onQualityNameChanged) self._global_container_stack = Application.getInstance().getGlobalContainerStack() self._active_container_stack = self._global_container_stack self.globalContainerChanged.emit() if self._global_container_stack: Preferences.getInstance().setValue("cura/active_machine", self._global_container_stack.getId()) self._global_container_stack.nameChanged.connect(self._onMachineNameChanged) self._global_container_stack.containersChanged.connect(self._onInstanceContainersChanged) self._global_container_stack.propertyChanged.connect(self._onPropertyChanged) material = self._global_container_stack.findContainer({"type": "material"}) material.nameChanged.connect(self._onMaterialNameChanged) quality = self._global_container_stack.findContainer({"type": "quality"}) quality.nameChanged.connect(self._onQualityNameChanged) def _onActiveExtruderStackChanged(self): self.blurSettings.emit() # Ensure no-one has focus. if self._active_container_stack and self._active_container_stack != self._global_container_stack: self._active_container_stack.containersChanged.disconnect(self._onInstanceContainersChanged) self._active_container_stack.propertyChanged.disconnect(self._onPropertyChanged) self._active_container_stack = ExtruderManager.getInstance().getActiveExtruderStack() if self._active_container_stack: self._active_container_stack.containersChanged.connect(self._onInstanceContainersChanged) self._active_container_stack.propertyChanged.connect(self._onPropertyChanged) else: self._active_container_stack = self._global_container_stack self._active_stack_valid = not self._checkStackForErrors(self._active_container_stack) self.activeValidationChanged.emit() def _onInstanceContainersChanged(self, container): container_type = container.getMetaDataEntry("type") if container_type == "material": self.activeMaterialChanged.emit() elif container_type == "variant": self.activeVariantChanged.emit() elif container_type == "quality": self.activeQualityChanged.emit() def _onPropertyChanged(self, key, property_name): if property_name == "value": # If a setting is not settable per extruder, but "has enabled relations" that are settable per extruder # we need to copy the value to global, so that the front-end displays the right settings. if not self._active_container_stack.getProperty(key, "settable_per_extruder"): relations = self._global_container_stack.getBottom()._getDefinition(key).relations for relation in filter(lambda r: r.role == "enabled" and r.type == RelationType.RequiredByTarget, relations): # Target setting is settable per extruder if self._active_container_stack.getProperty(relation.target.key, "settable_per_extruder"): new_value = self._global_container_stack.getProperty(key, "value") stacks = [stack for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId())] for extruder_stack in stacks: if extruder_stack.getProperty(key, "value") != new_value: extruder_stack.getTop().setProperty(key, "value", new_value) break if property_name == "validationState": if self._active_stack_valid: if self._active_container_stack.getProperty(key, "settable_per_extruder"): changed_validation_state = self._active_container_stack.getProperty(key, property_name) else: changed_validation_state = self._global_container_stack.getProperty(key, property_name) if changed_validation_state in (UM.Settings.ValidatorState.Exception, UM.Settings.ValidatorState.MaximumError, UM.Settings.ValidatorState.MinimumError): self._active_stack_valid = False self.activeValidationChanged.emit() else: if not self._checkStackForErrors(self._active_container_stack) and not self._checkStackForErrors(self._global_container_stack): self._active_stack_valid = True self.activeValidationChanged.emit() self.activeStackChanged.emit() @pyqtSlot(str) def setActiveMachine(self, stack_id): containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id = stack_id) if containers: Application.getInstance().setGlobalContainerStack(containers[0]) @pyqtSlot(str, str) def addMachine(self, name, definition_id): container_registry = UM.Settings.ContainerRegistry.getInstance() definitions = container_registry.findDefinitionContainers(id = definition_id) if definitions: definition = definitions[0] name = self._createUniqueName("machine", "", name, definition.getName()) new_global_stack = UM.Settings.ContainerStack(name) new_global_stack.addMetaDataEntry("type", "machine") container_registry.addContainer(new_global_stack) variant_instance_container = self._updateVariantContainer(definition) material_instance_container = self._updateMaterialContainer(definition, variant_instance_container) quality_instance_container = self._updateQualityContainer(definition, variant_instance_container, material_instance_container) current_settings_instance_container = UM.Settings.InstanceContainer(name + "_current_settings") current_settings_instance_container.addMetaDataEntry("machine", name) current_settings_instance_container.addMetaDataEntry("type", "user") current_settings_instance_container.setDefinition(definitions[0]) container_registry.addContainer(current_settings_instance_container) new_global_stack.addContainer(definition) if variant_instance_container: new_global_stack.addContainer(variant_instance_container) if material_instance_container: new_global_stack.addContainer(material_instance_container) if quality_instance_container: new_global_stack.addContainer(quality_instance_container) new_global_stack.addContainer(self._empty_quality_changes_container) new_global_stack.addContainer(current_settings_instance_container) ExtruderManager.getInstance().addMachineExtruders(definition, new_global_stack.getId()) Application.getInstance().setGlobalContainerStack(new_global_stack) ## Create a name that is not empty and unique # \param container_type \type{string} Type of the container (machine, quality, ...) # \param current_name \type{} Current name of the container, which may be an acceptable option # \param new_name \type{string} Base name, which may not be unique # \param fallback_name \type{string} Name to use when (stripped) new_name is empty # \return \type{string} Name that is unique for the specified type and name/id def _createUniqueName(self, container_type, current_name, new_name, fallback_name): return UM.Settings.ContainerRegistry.getInstance().createUniqueName(container_type, current_name, new_name, fallback_name) ## Convenience function to check if a stack has errors. def _checkStackForErrors(self, stack): if stack is None: return False for key in stack.getAllKeys(): validation_state = stack.getProperty(key, "validationState") if validation_state in (UM.Settings.ValidatorState.Exception, UM.Settings.ValidatorState.MaximumError, UM.Settings.ValidatorState.MinimumError): return True return False ## Remove all instances from the top instanceContainer (effectively removing all user-changed settings) @pyqtSlot() def clearUserSettings(self): if not self._active_container_stack: return self.blurSettings.emit() user_settings = self._active_container_stack.getTop() user_settings.clear() ## Check if the global_container has instances in the user container @pyqtProperty(bool, notify = activeStackChanged) def hasUserSettings(self): if not self._global_container_stack: return False if self._global_container_stack.getTop().findInstances(): return True for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId()): if stack.getTop().findInstances(): return True return False ## Delete a user setting from the global stack and all extruder stacks. # \param key \type{str} the name of the key to delete @pyqtSlot(str) def clearUserSettingAllCurrentStacks(self, key): if not self._global_container_stack: return self._global_container_stack.getTop().removeInstance(key) for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId()): stack.getTop().removeInstance(key) ## Check if the global profile does not contain error states # Note that the _active_stack_valid is cached due to performance issues # Calling _checkStackForErrors on every change is simply too expensive @pyqtProperty(bool, notify = activeValidationChanged) def isActiveStackValid(self): return bool(self._active_stack_valid) @pyqtProperty(str, notify = activeStackChanged) def activeUserProfileId(self): if self._active_container_stack: return self._active_container_stack.getTop().getId() return "" @pyqtProperty(str, notify = globalContainerChanged) def activeMachineName(self): if self._global_container_stack: return self._global_container_stack.getName() return "" @pyqtProperty(str, notify = globalContainerChanged) def activeMachineId(self): if self._global_container_stack: return self._global_container_stack.getId() return "" @pyqtProperty(str, notify = activeStackChanged) def activeStackId(self): if self._active_container_stack: return self._active_container_stack.getId() return "" @pyqtProperty(str, notify = activeMaterialChanged) def activeMaterialName(self): if self._active_container_stack: material = self._active_container_stack.findContainer({"type":"material"}) if material: return material.getName() return "" @pyqtProperty(str, notify=activeMaterialChanged) def activeMaterialId(self): if self._active_container_stack: material = self._active_container_stack.findContainer({"type": "material"}) if material: return material.getId() return "" @pyqtProperty("QVariantMap", notify = activeMaterialChanged) def allActiveMaterialIds(self): if not self._global_container_stack: return {} result = {} for stack in ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks(): material_container = stack.findContainer(type = "material") if not material_container: continue result[stack.getId()] = material_container.getId() return result ## Get the Material ID associated with the currently active material # \returns MaterialID (string) if found, empty string otherwise @pyqtProperty(str, notify=activeQualityChanged) def activeQualityMaterialId(self): if self._active_container_stack: quality = self._active_container_stack.findContainer({"type": "quality"}) if quality: material_id = quality.getMetaDataEntry("material") if material_id: # if the currently active machine inherits its qualities from a different machine # definition, make sure to return a material that is relevant to that machine definition definition_id = self.activeDefinitionId quality_definition_id = self.activeQualityDefinitionId if definition_id != quality_definition_id: material_id = material_id.replace(definition_id, quality_definition_id, 1) return material_id return "" @pyqtProperty(str, notify=activeQualityChanged) def activeQualityName(self): if self._active_container_stack: quality = self._active_container_stack.findContainer({"type": "quality_changes"}) if quality and quality != self._empty_quality_changes_container: return quality.getName() quality = self._active_container_stack.findContainer({"type": "quality"}) if quality: return quality.getName() return "" @pyqtProperty(str, notify=activeQualityChanged) def activeQualityId(self): if self._global_container_stack: quality = self._global_container_stack.findContainer({"type": "quality_changes"}) if quality and quality != self._empty_quality_changes_container: return quality.getId() quality = self._global_container_stack.findContainer({"type": "quality"}) if quality: return quality.getId() return "" @pyqtProperty(str, notify = activeQualityChanged) def activeQualityType(self): if self._global_container_stack: quality = self._global_container_stack.findContainer(type = "quality") if quality: return quality.getMetaDataEntry("quality_type") return "" @pyqtProperty(str, notify = activeQualityChanged) def activeQualityChangesId(self): if self._global_container_stack: changes = self._global_container_stack.findContainer(type = "quality_changes") if changes: return changes.getId() return "" ## Check if a container is read_only @pyqtSlot(str, result = bool) def isReadOnly(self, container_id): containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = container_id) if not containers or not self._active_container_stack: return True return containers[0].isReadOnly() ## Copy the value of the setting of the current extruder to all other extruders as well as the global container. @pyqtSlot(str) def copyValueToExtruders(self, key): if not self._active_container_stack or self._global_container_stack.getProperty("machine_extruder_count", "value") <= 1: return new_value = self._active_container_stack.getProperty(key, "value") stacks = [stack for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId())] stacks.append(self._global_container_stack) for extruder_stack in stacks: if extruder_stack != self._active_container_stack and extruder_stack.getProperty(key, "value") != new_value: extruder_stack.getTop().setProperty(key, "value", new_value) @pyqtSlot(str) def setActiveMaterial(self, material_id): containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = material_id) if not containers or not self._active_container_stack: return Logger.log("d", "Attempting to change the active material to %s", material_id) old_variant = self._active_container_stack.findContainer({"type": "variant"}) old_material = self._active_container_stack.findContainer({"type": "material"}) old_quality = self._active_container_stack.findContainer({"type": "quality"}) old_quality_changes = self._active_container_stack.findContainer({"type": "quality_changes"}) if not old_material: Logger.log("w", "While trying to set the active material, no material was found to replace it.") return if old_quality_changes.getId() == "empty_quality_changes": #Don't want the empty one. old_quality_changes = None self.blurSettings.emit() old_material.nameChanged.disconnect(self._onMaterialNameChanged) material_index = self._active_container_stack.getContainerIndex(old_material) self._active_container_stack.replaceContainer(material_index, containers[0]) containers[0].nameChanged.connect(self._onMaterialNameChanged) if containers[0].getMetaDataEntry("compatible") == False: message = Message(catalog.i18nc("@info:status", "The selected material is imcompatible with the selected machine or configuration.")) message.show() if old_quality: if old_quality_changes: new_quality = self._updateQualityChangesContainer(old_quality.getMetaDataEntry("quality_type"), old_quality_changes.getMetaDataEntry("name")) else: new_quality = self._updateQualityContainer(self._global_container_stack.getBottom(), old_variant, containers[0], old_quality.getName()) else: new_quality = self._updateQualityContainer(self._global_container_stack.getBottom(), old_variant, containers[0]) self.setActiveQuality(new_quality.getId()) @pyqtSlot(str) def setActiveVariant(self, variant_id): containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = variant_id) if not containers or not self._active_container_stack: return Logger.log("d", "Attempting to change the active variant to %s", variant_id) old_variant = self._active_container_stack.findContainer({"type": "variant"}) old_material = self._active_container_stack.findContainer({"type": "material"}) if old_variant: self.blurSettings.emit() variant_index = self._active_container_stack.getContainerIndex(old_variant) self._active_container_stack.replaceContainer(variant_index, containers[0]) preferred_material = None if old_material: preferred_material_name = old_material.getName() self.setActiveMaterial(self._updateMaterialContainer(self._global_container_stack.getBottom(), containers[0], preferred_material_name).id) else: Logger.log("w", "While trying to set the active variant, no variant was found to replace.") @pyqtSlot(str) def setActiveQuality(self, quality_id): containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = quality_id) if not containers or not self._global_container_stack: return Logger.log("d", "Attempting to change the active quality to %s", quality_id) self.blurSettings.emit() quality_container = None quality_changes_container = self._empty_quality_changes_container container_type = containers[0].getMetaDataEntry("type") if container_type == "quality": quality_container = containers[0] elif container_type == "quality_changes": quality_changes_container = containers[0] containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers( quality_type = quality_changes_container.getMetaDataEntry("quality")) if not containers: Logger.log("e", "Could not find quality %s for changes %s, not changing quality", quality_changes_container.getMetaDataEntry("quality"), quality_changes_container.getId()) return quality_container = containers[0] else: Logger.log("e", "Tried to set quality to a container that is not of the right type") return quality_type = quality_container.getMetaDataEntry("quality_type") if not quality_type: quality_type = quality_changes_container.getName() for stack in ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks(): extruder_id = stack.getId() if stack != self._global_container_stack else None criteria = { "quality_type": quality_type, "extruder": extruder_id } material = stack.findContainer(type = "material") if material and material is not self._empty_material_container: criteria["material"] = material.getId() if self._global_container_stack.getMetaDataEntry("has_machine_quality"): criteria["definition"] = self.activeQualityDefinitionId else: criteria["definition"] = "fdmprinter" stack_quality = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**criteria) if not stack_quality: criteria.pop("extruder") stack_quality = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**criteria) if not stack_quality: stack_quality = quality_container else: stack_quality = stack_quality[0] else: stack_quality = stack_quality[0] if quality_changes_container != self._empty_quality_changes_container: stack_quality_changes = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(name = quality_changes_container.getName(), extruder = extruder_id)[0] else: stack_quality_changes = self._empty_quality_changes_container old_quality = stack.findContainer(type = "quality") if old_quality: old_quality.nameChanged.disconnect(self._onQualityNameChanged) else: Logger.log("w", "Could not find old quality while changing active quality.") old_changes = stack.findContainer(type = "quality_changes") if old_changes: old_changes.nameChanged.disconnect(self._onQualityNameChanged) else: Logger.log("w", "Could not find old quality_changes while changing active quality.") stack.replaceContainer(stack.getContainerIndex(old_quality), stack_quality) stack.replaceContainer(stack.getContainerIndex(old_changes), stack_quality_changes) stack_quality.nameChanged.connect(self._onQualityNameChanged) stack_quality_changes.nameChanged.connect(self._onQualityNameChanged) if self.hasUserSettings and Preferences.getInstance().getValue("cura/active_mode") == 1: # Ask the user if the user profile should be cleared or not (discarding the current settings) # In Simple Mode we assume the user always wants to keep the (limited) current settings details = catalog.i18nc("@label", "You made changes to the following setting(s):") user_settings = self._active_container_stack.getTop().findInstances(**{}) for setting in user_settings: details = details + "\n " + setting.definition.label Application.getInstance().messageBox(catalog.i18nc("@window:title", "Switched profiles"), catalog.i18nc("@label", "Do you want to transfer your changed settings to this profile?"), catalog.i18nc("@label", "If you transfer your settings they will override settings in the profile."), details, buttons = QMessageBox.Yes + QMessageBox.No, icon = QMessageBox.Question, callback = self._keepUserSettingsDialogCallback) self.activeQualityChanged.emit() def _keepUserSettingsDialogCallback(self, button): if button == QMessageBox.Yes: # Yes, keep the settings in the user profile with this profile pass elif button == QMessageBox.No: # No, discard the settings in the user profile global_stack = Application.getInstance().getGlobalContainerStack() for extruder in ExtruderManager.getInstance().getMachineExtruders(global_stack.getId()): extruder.getTop().clear() global_stack.getTop().clear() @pyqtProperty(str, notify = activeVariantChanged) def activeVariantName(self): if self._active_container_stack: variant = self._active_container_stack.findContainer({"type": "variant"}) if variant: return variant.getName() return "" @pyqtProperty(str, notify = activeVariantChanged) def activeVariantId(self): if self._active_container_stack: variant = self._active_container_stack.findContainer({"type": "variant"}) if variant: return variant.getId() return "" @pyqtProperty(str, notify = globalContainerChanged) def activeDefinitionId(self): if self._global_container_stack: definition = self._global_container_stack.getBottom() if definition: return definition.id return "" ## Get the Definition ID to use to select quality profiles for the currently active machine # \returns DefinitionID (string) if found, empty string otherwise # \sa getQualityDefinitionId @pyqtProperty(str, notify = globalContainerChanged) def activeQualityDefinitionId(self): if self._global_container_stack: return self.getQualityDefinitionId(self._global_container_stack.getBottom()) return "" ## Get the Definition ID to use to select quality profiles for machines of the specified definition # This is normally the id of the definition itself, but machines can specify a different definition to inherit qualities from # \param definition (DefinitionContainer) machine definition # \returns DefinitionID (string) if found, empty string otherwise def getQualityDefinitionId(self, definition): definition_id = definition.getMetaDataEntry("quality_definition") if not definition_id: definition_id = definition.getId() return definition_id ## Get the Variant ID to use to select quality profiles for the currently active variant # \returns VariantID (string) if found, empty string otherwise # \sa getQualityVariantId @pyqtProperty(str, notify = activeVariantChanged) def activeQualityVariantId(self): if self._global_container_stack: variant = self._global_container_stack.findContainer({"type": "variant"}) if variant: return self.getQualityVariantId(self._global_container_stack.getBottom(), variant) return "" ## Get the Variant ID to use to select quality profiles for variants of the specified definitions # This is normally the id of the variant itself, but machines can specify a different definition # to inherit qualities from, which has consequences for the variant to use as well # \param definition (DefinitionContainer) machine definition # \param variant (DefinitionContainer) variant definition # \returns VariantID (string) if found, empty string otherwise def getQualityVariantId(self, definition, variant): variant_id = variant.getId() definition_id = definition.getId() quality_definition_id = self.getQualityDefinitionId(definition) if definition_id != quality_definition_id: variant_id = variant_id.replace(definition_id, quality_definition_id, 1) return variant_id ## Gets how the active definition calls variants # Caveat: per-definition-variant-title is currently not translated (though the fallback is) @pyqtProperty(str, notify = globalContainerChanged) def activeDefinitionVariantsName(self): fallback_title = catalog.i18nc("@label", "Nozzle") if self._global_container_stack: return self._global_container_stack.getBottom().getMetaDataEntry("variants_name", fallback_title) return fallback_title @pyqtSlot(str, str) def renameMachine(self, machine_id, new_name): containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id = machine_id) if containers: new_name = self._createUniqueName("machine", containers[0].getName(), new_name, containers[0].getBottom().getName()) containers[0].setName(new_name) self.globalContainerChanged.emit() @pyqtSlot(str) def removeMachine(self, machine_id): # If the machine that is being removed is the currently active machine, set another machine as the active machine. activate_new_machine = (self._global_container_stack and self._global_container_stack.getId() == machine_id) ExtruderManager.getInstance().removeMachineExtruders(machine_id) containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "user", machine = machine_id) for container in containers: UM.Settings.ContainerRegistry.getInstance().removeContainer(container.getId()) UM.Settings.ContainerRegistry.getInstance().removeContainer(machine_id) if activate_new_machine: stacks = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(type = "machine") if stacks: Application.getInstance().setGlobalContainerStack(stacks[0]) @pyqtProperty(bool, notify = globalContainerChanged) def hasMaterials(self): if self._global_container_stack: return bool(self._global_container_stack.getMetaDataEntry("has_materials", False)) return False @pyqtProperty(bool, notify = globalContainerChanged) def hasVariants(self): if self._global_container_stack: return bool(self._global_container_stack.getMetaDataEntry("has_variants", False)) return False ## Property to indicate if a machine has "specialized" material profiles. # Some machines have their own material profiles that "override" the default catch all profiles. @pyqtProperty(bool, notify = globalContainerChanged) def filterMaterialsByMachine(self): if self._global_container_stack: return bool(self._global_container_stack.getMetaDataEntry("has_machine_materials", False)) return False ## Property to indicate if a machine has "specialized" quality profiles. # Some machines have their own quality profiles that "override" the default catch all profiles. @pyqtProperty(bool, notify = globalContainerChanged) def filterQualityByMachine(self): if self._global_container_stack: return bool(self._global_container_stack.getMetaDataEntry("has_machine_quality", False)) return False ## Get the Definition ID of a machine (specified by ID) # \param machine_id string machine id to get the definition ID of # \returns DefinitionID (string) if found, None otherwise @pyqtSlot(str, result = str) def getDefinitionByMachineId(self, machine_id): containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id=machine_id) if containers: return containers[0].getBottom().getId() @staticmethod def createMachineManager(engine=None, script_engine=None): return MachineManager() def _updateVariantContainer(self, definition): if not definition.getMetaDataEntry("has_variants"): return self._empty_variant_container containers = [] preferred_variant = definition.getMetaDataEntry("preferred_variant") if preferred_variant: containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "variant", definition = definition.id, id = preferred_variant) if not containers: containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "variant", definition = definition.id) if containers: return containers[0] return self._empty_variant_container def _updateMaterialContainer(self, definition, variant_container = None, preferred_material_name = None): if not definition.getMetaDataEntry("has_materials"): return self._empty_material_container search_criteria = { "type": "material" } if definition.getMetaDataEntry("has_machine_materials"): search_criteria["definition"] = self.getQualityDefinitionId(definition) if definition.getMetaDataEntry("has_variants") and variant_container: search_criteria["variant"] = self.getQualityVariantId(definition, variant_container) else: search_criteria["definition"] = "fdmprinter" if preferred_material_name: search_criteria["name"] = preferred_material_name else: preferred_material = definition.getMetaDataEntry("preferred_material") if preferred_material: search_criteria["id"] = preferred_material containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if containers: return containers[0] containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if "variant" in search_criteria or "id" in search_criteria: # If a material by this name can not be found, try a wider set of search criteria search_criteria.pop("variant", None) search_criteria.pop("id", None) containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria) if containers: return containers[0] Logger.log("w", "Unable to find a material container with provided criteria, returning an empty one instead.") return self._empty_material_container def _updateQualityContainer(self, definition, variant_container, material_container = None, preferred_quality_name = None): container_registry = UM.Settings.ContainerRegistry.getInstance() search_criteria = { "type": "quality" } if definition.getMetaDataEntry("has_machine_quality"): search_criteria["definition"] = self.getQualityDefinitionId(definition) if definition.getMetaDataEntry("has_materials") and material_container: search_criteria["material"] = material_container.id else: search_criteria["definition"] = "fdmprinter" if preferred_quality_name and preferred_quality_name != "empty": search_criteria["name"] = preferred_quality_name else: preferred_quality = definition.getMetaDataEntry("preferred_quality") if preferred_quality: search_criteria["id"] = preferred_quality containers = container_registry.findInstanceContainers(**search_criteria) if containers: return containers[0] if "material" in search_criteria: # First check if we can solve our material not found problem by checking if we can find quality containers # that are assigned to the parents of this material profile. try: inherited_files = material_container.getInheritedFiles() except AttributeError: # Material_container does not support inheritance. inherited_files = [] if inherited_files: for inherited_file in inherited_files: # Extract the ID from the path we used to load the file. search_criteria["material"] = os.path.basename(inherited_file).split(".")[0] containers = container_registry.findInstanceContainers(**search_criteria) if containers: return containers[0] # We still weren't able to find a quality for this specific material. # Try to find qualities for a generic version of the material. material_search_criteria = { "type": "material", "material": material_container.getMetaDataEntry("material"), "color_name": "Generic"} if definition.getMetaDataEntry("has_machine_quality"): if material_container: material_search_criteria["definition"] = material_container.getDefinition().id if definition.getMetaDataEntry("has_variants"): material_search_criteria["variant"] = material_container.getMetaDataEntry("variant") else: material_search_criteria["definition"] = self.getQualityDefinitionId(definition) if definition.getMetaDataEntry("has_variants") and variant_container: material_search_criteria["variant"] = self.getQualityVariantId(definition, variant_container) else: material_search_criteria["definition"] = "fdmprinter" material_containers = container_registry.findInstanceContainers(**material_search_criteria) if material_containers: search_criteria["material"] = material_containers[0].getId() containers = container_registry.findInstanceContainers(**search_criteria) if containers: return containers[0] if "name" in search_criteria or "id" in search_criteria: # If a quality by this name can not be found, try a wider set of search criteria search_criteria.pop("name", None) search_criteria.pop("id", None) containers = container_registry.findInstanceContainers(**search_criteria) if containers: return containers[0] # Notify user that we were unable to find a matching quality message = Message(catalog.i18nc("@info:status", "Unable to find a quality profile for this combination. Default settings will be used instead.")) message.show() return self._empty_quality_container ## Finds a quality-changes container to use if any other container # changes. # # \param quality_type The quality type to find a quality-changes for. # \param preferred_quality_changes_name The name of the quality-changes to # pick, if any such quality-changes profile is available. def _updateQualityChangesContainer(self, quality_type, preferred_quality_changes_name = None): container_registry = UM.Settings.ContainerRegistry.getInstance() # Cache. search_criteria = { "type": "quality_changes" } search_criteria["quality"] = quality_type if preferred_quality_changes_name: search_criteria["name"] = preferred_quality_changes_name # Try to search with the name in the criteria first, since we prefer to have the correct name. containers = container_registry.findInstanceContainers(**search_criteria) if containers: # Found one! return containers[0] if "name" in search_criteria: del search_criteria["name"] # Not found, then drop the name requirement (if we had one) and search again. containers = container_registry.findInstanceContainers(**search_criteria) if containers: return containers[0] return self._empty_quality_changes_container # Didn't find anything with the required quality_type. def _onMachineNameChanged(self): self.globalContainerChanged.emit() def _onMaterialNameChanged(self): self.activeMaterialChanged.emit() def _onQualityNameChanged(self): self.activeQualityChanged.emit()
agpl-3.0
-7,270,229,336,275,078,000
49.012948
192
0.669548
false
hustbeta/openstack-juno-api-adventure
examples/nova/v2/11_create_server.py
1
1380
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import keystoneclient import keystoneclient.auth.identity.v3 import keystoneclient.session import keystoneclient.v3.client import novaclient.client import local_settings auth = keystoneclient.auth.identity.v3.Password(auth_url=local_settings.auth_url_v3, username=local_settings.username, password=local_settings.password, user_domain_name='Default', project_domain_name='Default', project_name=local_settings.tenant_name) session = keystoneclient.session.Session(auth=auth) nova = novaclient.client.Client('2', session=session) server = nova.servers.create(name='test-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S'), min_count=2, image='397ceee8-ee08-4919-b163-d10c20b42029', flavor='465c0d60-b4f9-4adb-ba68-a6a4ec9a835d', meta={'description': 'fdsfsdfsdf'}, availability_zone='pulsar', nics=[{'net-id': '2d2784cb-3e40-4db4-b54b-d731290810c6'}]) print json.dumps(server.to_dict())
mit
-8,997,592,995,425,463,000
43.516129
93
0.544203
false
Foggalong/scraps
files/cc/game.py
1
2715
#!/usr/bin/python3 import time from random import randint # Game Vars # Health hvar = 100 svar = 100 # Fight # stats not final # player # weapons weaponls = ['bare hands', 'sword', 'axe', 'staff'] wpninpackls = [1, 1 ,1, 0] wpnhealthls = [100, 20, 30, 50] wpndamagels = [5, 7, 10, 20] wpnchancels = [8, 7, 5, 6] # monsters monsterls = ['goblin', 'troll', 'warlock'] monhealthls = [10, 20, 50] mondamagels = [5, 10, 15] monchancels = [2, 5, 8] # value out of ten #/Fight class funct: def info(self, item): if item in monsterls: print("Name:", item) print("Type: monster") print("Health:", monhealthls[monsterls.index(item)]) print("Damage:", mondamagels[monsterls.index(item)]) print("Chance:", monchancels[monsterls.index(item)]) elif item in weaponls: print("Name:", item) print("Type: weapon") print("Health:", wpnhealthls[weaponls.index(item)]) print("Damage:", wpndamagels[weaponls.index(item)]) print("Chance:", wpnchancels[weaponls.index(item)]) else: print("No information could be found.") def fight(self, monster): global hvar ind = monsterls.index(monster) monhealth, mondamage, monchance = monhealthls[ind], mondamagels[ind], monchancels[ind] run = 1 while run == 1: action = input("\n> ") # if 'attack' in action: # any(word in str1 for word in weapon) if action == 'fight': roll = randint(0, 10) if roll > monchance: monhealth -= 7 print("You landed a blow!") elif roll == monchance: print("You and the", monster, "clashed!") elif roll < monchance: print("The", monster, "landed a blow!") hvar -= mondamage if monhealth < 1 or hvar < 1: if monhealth < 1: print("You killed the "+monster+"!\n") elif hvar < 1: print("The "+monster+" killed you!\n") break elif action == 'battle info': print("Your health:", hvar) print(monster+"'s health:", monhealth, "\n") elif action.split()[0] == 'info': try: funct.info(self, action.split()[1]) except: print("Information about what?") monster = monsterls[randint(0, len(monsterls)-1)] action = input("A wild "+monster+" appears!\n> ") if action == 'fight': funct.fight(funct, monster) elif action == 'run': print("You ran away from the "+monster+"! WUSS!") print("\nDebug died!") print("Program fin.\nIt will close in\n1 mintue.") time.sleep(60) # A nice example of classes """ Paul 12 >>> class tut: ... def name(self, name): ... print(name, age) ... age = 12 ... >>> tut.name("Paul") Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: name() takes exactly 2 arguments (1 given) >>> tut.name(tut, "Paul") Paul 12 >>> """
gpl-2.0
-3,856,779,248,668,732,000
22.617391
88
0.624309
false
lavish/drs
robot/ev3dev_utils.py
1
3416
import time, ev3dev def run_for(motor, power=75, ever=None, seconds=None, degrees=None): """ Run motor for specified amount of seconds, degrees, or forever Examples: run_for(motor, ever=True) run_for(motor, seconds=0.5) run_for(motor, degrees=270, power=100) Power is specified in percents in the range of [-100; 100]. In case the motor is in regulation mode, the power value is used to compute pulses_per_seconds value. The upper limits for pulses_per_second assumed to be 900 and 1200 for tacho and minitacho motors accordingly. """ #motor.regulation_mode = ev3dev.motor.mode_on if motor.regulation_mode == ev3dev.motor.mode_on: motor.pulses_per_second_setpoint = int(power) else: motor.duty_cycle_setpoint = int(power) if ever is not None: motor.run_mode = ev3dev.motor.run_mode_forever elif seconds is not None: motor.run_mode = ev3dev.motor.run_mode_time motor.time_setpoint = int(seconds * 1000) elif degrees is not None: motor.run_mode = ev3dev.motor.run_mode_position motor.position_mode = ev3dev.motor.position_mode_relative motor.position = 0 motor.position_setpoint = int(degrees) motor.run() def run_until(motor, power=75, degrees=None, check=None): """ Run motor until specified position or until check() evaluates to True. Examples: run_until(motor, degrees=270, power=40) run_until(motor, check=lambda: touch_sensor.value()) Power is specified in percents in the range of [-100; 100]. In case the motor is in regulation mode, the power value is used to compute pulses_per_seconds value. The upper limits for pulses_per_second assumed to be 900 and 1200 for tacho and minitacho motors accordingly. """ if motor.regulation_mode == ev3dev.motor.mode_on: if motor.type() == 'tacho': motor.pulses_per_second_setpoint = int(power * 9) elif motor.type() == 'minitacho': motor.pulses_per_second_setpoint = int(power * 12) else: motor.duty_cycle_setpoint = int(power) if degrees is not None: motor.run_mode = ev3dev.motor.run_mode_position motor.position_mode = ev3dev.motor.position_mode_absolute motor.position_setpoint = int(degrees) else: motor.run_mode = ev3dev.motor.run_mode_forever motor.run() while True: if degrees is not None: if not motor.running(): break elif check(): motor.stop() break def drive_for(left_motor, right_motor, direction=0, power=75, ever=None, seconds=None): """ Run both motors for a specified amount of seconds, or forever. The direction parameter is in range [-100, 100] and specifies how fast the robot should turn. direction = -100: turn left as fast as possible, direction = 0: drive forward, direction = 100: turn right as fast as possible. The motor on the outer arc is driven at full power (specified as 'power' parameter), and the inner motor power is computed accordingly. """ if (direction >= 0): master = left_motor slave = right_motor else: master = right_motor slave = left_motor mpower = power spower = power * (50 - abs(direction)) / 50 run_for(master, mpower, ever, seconds) run_for(slave, spower, ever, seconds)
mit
-6,067,089,681,917,996,000
34.583333
87
0.657201
false
ZeroQI/Hama.bundle
Contents/Code/common.py
1
55275
### common ### # https://www.python.org/dev/peps/pep-0008/ # Usage: "common.GetPosters" = "from common import GetPosters" ### Imports ### ### Functions used ### # Python Modules # import os # path.abspath, join, dirname import time # datetime.datetime.now() import re # sub import logging # import datetime # datetime.now import ssl, urllib2 # urlopen import unicodedata # import StringIO, gzip # from string import maketrans # maketrans import threading #local, tlocal = threading.local() #Log.Info('tlocal: {}'.format(dir(tlocal))) ### Variables ### PlexRoot = Core.app_support_path #if not os.path.isdir(PlexRoot): # path_location = { 'Windows': '%LOCALAPPDATA%\\Plex Media Server', # 'MacOSX': '$HOME/Library/Application Support/Plex Media Server', # 'Linux': '$PLEX_HOME/Library/Application Support/Plex Media Server' } # PlexRoot = os.path.expandvars(path_location[Platform.OS.lower()] if Platform.OS.lower() in path_location else '~') # Platform.OS: Windows, MacOSX, or Linux CachePath = os.path.join(PlexRoot, "Plug-in Support", "Data", "com.plexapp.agents.hama", "DataItems") downloaded = {'posters':0, 'art':0, 'seasons':0, 'banners':0, 'themes':0, 'thumbs': 0} netLock = Thread.Lock() netLocked = {} WEB_LINK = "<a href='%s' target='_blank'>%s</a>" TVDB_SERIE_URL = 'https://thetvdb.com/?tab=series&id=' # Used in error_log generation ANIDB_SERIE_URL = 'https://anidb.net/anime/' # Used in error_log generation DefaultPrefs = ("SerieLanguagePriority", "EpisodeLanguagePriority", "PosterLanguagePriority", "AnidbGenresMainOnly", "MinimumWeight", "adult", "OMDbApiKey") #"Simkl", FieldListMovies = ('original_title', 'title', 'title_sort', 'roles', 'studio', 'year', 'originally_available_at', 'tagline', 'summary', 'content_rating', 'content_rating_age', 'producers', 'directors', 'writers', 'countries', 'posters', 'art', 'themes', 'rating', 'quotes', 'trivia') FieldListSeries = ('title', 'title_sort', 'originally_available_at', 'duration','rating', 'reviews', 'collections', 'genres', 'tags' , 'summary', 'extras', 'countries', 'rating_count', 'content_rating', 'studio', 'countries', 'posters', 'banners', 'art', 'themes', 'roles', 'original_title', 'rating_image', 'audience_rating', 'audience_rating_image') # Not in Framework guide 2.1.1, in https://github.com/plexinc-agents/TheMovieDb.bundle/blob/master/Contents/Code/__init__.py FieldListSeasons = ('summary','posters', 'art') #'summary', FieldListEpisodes = ('title', 'summary', 'originally_available_at', 'writers', 'directors', 'producers', 'guest_stars', 'rating', 'thumbs', 'duration', 'content_rating', 'content_rating_age', 'absolute_index') #'titleSort SourceList = ('AniDB', 'MyAnimeList', 'FanartTV', 'OMDb', 'TheTVDB', 'TheMovieDb', 'Plex', 'AnimeLists', 'tvdb4', 'TVTunes', 'Local', 'AniList') #"Simkl", Movie_to_Serie_US_rating = {"G" : "TV-Y7", "PG" : "TV-G", "PG-13": "TV-PG", "R" : "TV-14", "R+" : "TV-MA", "Rx" : "NC-17"} COMMON_HEADERS = {'User-agent': 'Plex/HAMA', 'Content-type': 'application/json'} THROTTLE = {} ### Plex Library XML ### PLEX_LIBRARY, PLEX_LIBRARY_URL = {}, "http://localhost:32400/library/sections/" # Allow to get the library name to get a log per library https://support.plex.tv/hc/en-us/articles/204059436-Finding-your-account-token-X-Plex-Token def GetPlexLibraries(): try: library_xml = XML.ElementFromURL(PLEX_LIBRARY_URL, cacheTime=0, timeout=float(30), headers={"X-Plex-Token": os.environ['PLEXTOKEN']}) PLEX_LIBRARY.clear() Log.Root('Libraries: ') for directory in library_xml.iterchildren('Directory'): for location in directory: if directory.get("agent") == "com.plexapp.agents.hama": PLEX_LIBRARY[location.get("path")] = directory.get("title") # Only pull libraries that use HAMA to prevent miss identification Log.Root('[{}] id: {:>2}, type: {:<6}, agent: {:<30}, scanner: {:<30}, library: {:<24}, path: {}'.format('x' if directory.get("agent") == "com.plexapp.agents.hama" else ' ', directory.get("key"), directory.get('type'), directory.get("agent"), directory.get("scanner"), directory.get('title'), location.get("path"))) except Exception as e: Log.Root("PLEX_LIBRARY_URL - Exception: '{}'".format(e)) ### Get media directory ### def GetMediaDir(media, movie, file=False): if movie: return media.items[0].parts[0].file if file else os.path.dirname(media.items[0].parts[0].file) else: for s in media.seasons if media else []: # TV_Show: for e in media.seasons[s].episodes: return media.seasons[s].episodes[e].items[0].parts[0].file if file else os.path.dirname(media.seasons[s].episodes[e].items[0].parts[0].file) ### Get media root folder ### def GetLibraryRootPath(dir, repull_libraries=True): roots_found, library, root, path = [], '', '', '' for root in [os.sep.join(dir.split(os.sep)[0:x+2]) for x in range(0, dir.count(os.sep))]: if root in PLEX_LIBRARY: roots_found.append(root) if len(roots_found) > 0: root = max(roots_found) library = PLEX_LIBRARY[root] path = os.path.relpath(dir, root) else: if repull_libraries: GetPlexLibraries() # Repull library listings as if a library was created while HAMA was already running, it would not be known library, root, path = GetLibraryRootPath(dir, repull_libraries=False) # Try again but don't repull libraries as it will get stuck in an infinite loop else: path, root = '_unknown_folder', '' return library, root, path class PlexLog(object): ''' Logging class to join scanner and agent logging per serie Usage Scanner: (not used currently in scanner as independant from Hama) - from "../../Plug-ins/Hama.bundle/Contents/code/common" import PlexLog - log = PlexLog(file='root/folder/[anidb2-xxxx].log', isAgent=False) Usage Agent: - log = common.PlexLog(file='mytest.log', isAgent=True ) - log.debug('some debug message: %s', 'test123') ''' def Logger (self): logger = logging.getLogger(hex(threading.currentThread().ident)) return logger if logger.handlers else logging.getLogger('com.plexapp.agents.hama') def Root (self, msg, *args, **kwargs): logging.getLogger('com.plexapp.agents.hama').debug(msg, *args, **kwargs) def Debug (self, msg, *args, **kwargs): self.Logger().debug (msg, *args, **kwargs) def Info (self, msg, *args, **kwargs): self.Logger().info (msg, *args, **kwargs) def Warning (self, msg, *args, **kwargs): self.Logger().warning (msg, *args, **kwargs) def Error (self, msg, *args, **kwargs): self.Logger().error ("ERROR: {}".format(msg), *args, **kwargs); self.Root("ERROR: {}".format(msg)) def Critical (self, msg, *args, **kwargs): self.Logger().critical("FATAL: {}".format(msg), *args, **kwargs); self.Root("FATAL: {}".format(msg)) def Open (self, media=None, movie=False, search=False, isAgent=True, log_format='%(message)s', file="", mode='w', maxBytes=4*1024*1024, backupCount=5, encoding=None, delay=False, enable_debug=True): if not file: library, root, path = GetLibraryRootPath(GetMediaDir(media, movie))#Get movie or serie episode folder location mode = 'a' if path in ('_unknown_folder', '_root_') else 'w' #Logs folder for char in list("\\/:*?<>|~;"): # remove leftover parenthesis (work with code a bit above) if char in library: library = library.replace(char, '-') # translate anidb apostrophes into normal ones LOGS_PATH = os.path.join(CachePath, '_Logs', library) if not os.path.exists(LOGS_PATH): os.makedirs(LOGS_PATH); self.Debug("[!] folder: '{}'created".format(LOGS_PATH)) if path=='' and root: path='_root_' filename = path.split(os.sep, 1)[0]+'.agent-search.log' if search else path.split(os.sep, 1)[0]+'.agent-update.log' file = os.path.join(LOGS_PATH, filename) try: log = logging.getLogger(hex(threading.currentThread().ident)) # update thread's logging handler for handler in log.handlers: log.removeHandler(handler) # remove all old handlers handler_new = logging.FileHandler(file, mode=mode or 'w', encoding=encoding, delay=delay) handler_new.setFormatter(logging.Formatter(log_format)) # Set log format log.addHandler(handler_new) log.setLevel(logging.DEBUG if enable_debug else logging.INFO) # update level log = logging.getLogger('com.plexapp.agents.hama') # update hama root's logging handler library_log = os.path.join(LOGS_PATH, '_root_.agent.log') if library_log not in [handler.baseFilename for handler in log.handlers if hasattr(handler, 'baseFilename')]: for handler in log.handlers: if hasattr(handler, 'baseFilename') and os.path.join(CachePath, '_Logs') in handler.baseFilename: log.removeHandler(handler) handler_new = logging.handlers.RotatingFileHandler(library_log, mode='a', maxBytes=4*1024*1024, backupCount=1, encoding=encoding, delay=delay) #handler_new = logging.FileHandler(library_log, mode='w', encoding=encoding, delay=delay) handler_new.setFormatter(logging.Formatter('%(asctime)-15s - %(thread)x - %(message)s')) # Set log format log.addHandler(handler_new) log.info('==== common.PlexLog(file="{}")'.format(file)) except IOError as e: self.isAgent = isAgent; logging.getLogger('com.plexapp.agents.hama').info('updateLoggingConfig: failed to set logfile: {}'.format(e)) self.Info("".ljust(157, '=')) self.Info('common.PlexLog(file="{}", movie={})'.format(file, movie)) self.Info('[!] file: "{}"'.format(GetMediaDir(media, movie, True))) self.Info('[ ] library: "{}"'.format(library)) self.Info('[ ] root: "{}"'.format(root)) self.Info('[ ] path: "{}"'.format(path)) self.Info('[ ] Plex root: "{}"'.format(PlexRoot)) self.Info('[ ] Log folder: "{}"'.format(os.path.relpath(LOGS_PATH, PlexRoot))) self.Info('[ ] Log file: "{}"'.format(filename)) self.Info('[ ] Logger: "{}"'.format(hex(threading.currentThread().ident))) self.Info('[ ] mode: "{}"'.format(mode)) self.isAgent = isAgent def Close (self): log = logging.getLogger(hex(threading.currentThread().ident)) # update root logging's handler for handler in log.handlers: log.removeHandler(handler) Log = PlexLog() ### Code reduction one-liners that get imported specifically ### #def GetMeta (source="", field="" ): return (downloaded[field]<=1) and (not source or source in Prefs['posters' if field=='seasons' else field]) and not Prefs['posters' if field=='seasons' else field]=="None" def GetXml (xml, field ): return xml.xpath(field)[0].text if xml.xpath(field) and xml.xpath(field)[0].text not in (None, '', 'N/A', 'null') else '' #allow isdigit() checks def urlFilename (url ): return "/".join(url.split('/')[3:]) def urlDomain (url ): return "/".join(url.split('/')[:3]) def natural_sort_key(s ): return [int(text) if text.isdigit() else text for text in re.split(r'([0-9]+)', str(s).lower())] # list.sort(key=natural_sort_key) #sorted(list, key=natural_sort_key) - Turn a string into string list of chunks "z23a" -> ["z", 23, "a"] def replaceList (string, a, b, *args): for index in a: string.replace(a[index], b[index], *args) return string def LevenshteinRatio(first, second): return 100 - int(100 * LevenshteinDistance(first, second) / float(max(len(first), len(second)))) if len(first)*len(second) else 0 def LevenshteinDistance(first, second): """ Compute Levenshtein distance """ if len(first) > len(second): first, second = second, first if len(second) == 0: return len(first) first_length = len(first ) + 1 second_length = len(second) + 1 distance_matrix = [[0] * second_length for x in range(first_length)] for i in range(first_length): distance_matrix[i][0] = i for j in range(second_length): distance_matrix[0][j] = j for i in xrange(1, first_length): for j in range(1, second_length): distance_matrix[i][j] = min(distance_matrix[i][j-1]+1, distance_matrix[i-1][j]+1, distance_matrix[i-1][j-1] + (1 if first[i-1] != second[j-1] else 0)) return distance_matrix[first_length-1][second_length-1] def IsIndex(var, index): #Avoid TypeError: argument of type 'NoneType' is not iterable """ Return the length of the array or index no errors """ try: return var[index] except: return '' def Dict(var, *arg, **kwarg): """ Return the value of an (imbricated) dictionnary, if all fields exist else return "" unless "default=new_value" specified as end argument Avoid TypeError: argument of type 'NoneType' is not iterable Ex: Dict(variable_dict, 'field1', 'field2', default = 0) """ for key in arg: if isinstance(var, dict) and key and key in var: var = var[key] else: return kwarg['default'] if kwarg and 'default' in kwarg else "" # Allow Dict(var, tvdbid).isdigit() for example return kwarg['default'] if var in (None, '', 'N/A', 'null') and kwarg and 'default' in kwarg else "" if var in (None, '', 'N/A', 'null') else var def SaveDict(value, var, *arg): """ Save non empty value to a (nested) Dictionary fields unless value is a list or dict for which it will extend it instead # ex: SaveDict(GetXml(ep, 'Rating'), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'rating') # ex: SaveDict(Dict(TheTVDB_dict, 'title'), TheTVDB_dict, 'title_sort') # ex: SaveDict(genre1, TheTVDB_dict, genre) to add to current list # ex: SaveDict([genre1, genre2], TheTVDB_dict, genre) to extend to current list """ if not value and value!=0: return "" # update dict only as string would revert to pre call value being immutable if not arg and (isinstance(var, list) or isinstance(var, dict)): if not (isinstance(var, list) or isinstance(var, dict)): var = value elif isinstance(value, list) or isinstance(value, dict): var.extend (value) else: var.append (value) return value for key in arg[:-1]: if not isinstance(var, dict): return "" if not key in var: var[key] = {} var = var[key] if not arg[-1] in var or not isinstance(var[arg[-1]], list): var[arg[-1]] = value elif isinstance(value, list) or isinstance(value, dict): var[arg[-1]].extend (value) else: var[arg[-1]].append (value) return value ### import var 2 dict into var and returns it def UpdateDict(var, var2): var.update(var2); return var def DictString(input_value, max_depth, initial_indent=0, depth=0): """ Expand a dict down to 'max_depth' and sort the keys. To print it on a single line with this function use (max_depth=0). EX: (max_depth=1) mappingList: { 'season_map': {'13493': {'max': '3', 'min': '3'}}} EX: (max_depth=2) mappingList: { 'season_map': { '9306': {'max': '2', 'min': '1'}, '11665': {'max': '3', 'min': '3'}}} """ output = "" indent = "\n" + " " * initial_indent + " " * (depth+1) if depth >= max_depth or not isinstance(input_value, dict): if isinstance(input_value, list) and depth<max_depth: output += "[" + indent + indent.join([("'{}'," if isinstance(x, str) else "{},").format(x) for x in input_value])[:-1] + "]" elif isinstance(input_value, dict): for i, key in enumerate(sorted(input_value, key=natural_sort_key)): output += ( "{}: ".format("'{}'".format(key.replace("'", "\\'")) if isinstance(key, basestring) else key) + "{}".format("'{}'".format(input_value[key].replace("'", "\\'").replace("\n", "\\n").replace("\r", "\\r")) if isinstance(input_value[key], basestring) else input_value[key]) + (", " if i!=len(input_value)-1 else "")) # remove last ',' output = "{" + output + "}" else: output += "{}".format(input_value) else: for i, key in enumerate(sorted(input_value, key=natural_sort_key)): value = input_value[key] if isinstance(input_value[key], basestring) else DictString(input_value[key], max_depth, initial_indent, depth+1) output += ( indent + "{}: ".format("'{}'".format(key.replace("'", "\\'")) if isinstance(key, basestring) else key) + "{}".format("'{}'".format(value.replace("'", "\\'").replace("\n", "\\n").replace("\r", "\\r")) if isinstance(input_value[key], basestring) else value) + ("," if i!=len(input_value)-1 else "")) # remove last ',' output = "{" + output + "}" return output # Other options passed on as can't define expansion depth #import pprint; pprint.pprint(input_value) #import json; return json.dumps(input_value, indent=2, sort_keys=True) def ssl_open(url, headers={}, timeout=20): ''' SSLV3_ALERT_HANDSHAKE_FAILURE 1. Do not verify certificates. A bit like how older Python versions worked Import ssl and urllib2 Use urllib2 with a default ssl context (which does not verify the certificate). Or: 2. Set PlexPluginCodePolicy to Elevated in Info.plist Add external Python libraries to your project bundle Import certifi and requests into your Python code Use requests ''' headers = UpdateDict(headers, COMMON_HEADERS) return urllib2.urlopen(urllib2.Request(url, headers=headers), context=ssl.SSLContext(ssl.PROTOCOL_SSLv23), timeout=timeout).read() def GetStatusCode(url): """ This function retreives the status code of a website by requesting HEAD data only from the host. This means that it only requests the headers. If the host cannot be reached or something else goes wrong, it returns None instead. urllib.parse.quote(string, safe='/', encoding=None, errors=None) - string: string your trying to encode - safe: string contain characters to ignore. Defualt is '/' - encoding: type of encoding url is in. Default is utf-8 - errors: specifies how errors are handled. Default is 'strict' which throws a UnicodeEncodeError, I think. #host = "/".join(url.split('/', 3)[:-1]) #path = url.replace(" ", "%20").split('/', 3)[3] #Log.Info("host: '%s', path: '%s'" % (host, path)) """ try: request = urllib2.Request(url) #urllib.quote #urllib2.quote(url,':/') request.get_method = lambda: 'HEAD' return urllib2.urlopen(request).getcode() # if "Content-Type: audio/mpeg" in response.info(): Log.Info("Content-Type: audio/mpeg") except Exception as e: return str(e) def SaveFile(filename="", file="", relativeDirectory=""): ''' Save file to cache, Thanks Dingmatt for folder creation ability ''' relativeFilename = os.path.join (relativeDirectory, filename) relativeDirectory, filename = os.path.split(relativeFilename) #if os.sep in filename: fullpathDirectory = os.path.abspath(os.path.join(CachePath, relativeDirectory)) try: if not os.path.exists(fullpathDirectory): os.makedirs(fullpathDirectory) Data.Save(relativeFilename, file) except Exception as e: Log.Debug("common.SaveFile() - Exception: {exception}, relativeFilename: '{relativeFilename}', file: '{file}'".format(exception=e, relativeFilename=relativeFilename, file=file)) else: Log.Info ("common.SaveFile() - CachePath: '{path}', file: '{file}'".format(path=CachePath, file=relativeFilename)) def decompress(file): times = 0 try: while True: file = gzip.GzipFile(fileobj=StringIO.StringIO(file)).read() times += 1 except: pass if times > 0: Log.Root("Decompression times: {}".format(times)) return file # Return string or object if appropriate def ObjectFromFile(file=""): file = decompress(file) #TEXT file if isinstance(file, basestring): #XML if file.startswith('<?xml '): #if type(file).__name__ == '_Element' or isinstance(file, basestring) and file.startswith('<?xml '): try: return XML.ElementFromString(file, max_size=1024*1024*10) # Overide max size to 10mb from 5mb default except Exception as e: Log.Info("XML corrupted. Exception: {}".format(e)) try: return XML.ElementFromString(file.decode('utf-8','ignore').replace('\b', '').encode("utf-8")) except Exception as e2: Log.Info("XML still corrupted after normalization. Exception: {}".format(e2)); return #JSON elif file.startswith('{'): #Json try: return JSON.ObjectFromString(file, encoding=None) except Exception as e: Log.Info("JSON corrupted. Exception: {}".format(e)); return #Empty file elif file=="": Log.Info("Empty file"); return return file def LoadFileCache(filename="", relativeDirectory=""): ''' Load file in Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems (return file_object, file_age) ''' relativeFilename = os.path.join(relativeDirectory, filename) fullpathFilename = os.path.abspath(os.path.join(CachePath, relativeDirectory, filename)) if filename.endswith(".xml.gz"): filename = filename[:-3] #anidb title database # Load from disk if present file, file_age, file_object = None, None, None if Data.Exists(relativeFilename): try: file = Data.Load(relativeFilename) except: Log.Debug("common.LoadFileCache() - File cache locally but failed loading - file: {}".format(relativeFilename)) else: file_object = ObjectFromFile(file) if file_object: file_age = time.time() - os.stat(fullpathFilename).st_mtime else: Log.Info('common.LoadFileCache() - local file "{}" deleted as failed validity test - file: {}'.format(relativeFilename, file)) Data.Remove(relativeFilename) #DELETE CACHE AS CORRUPTED return file_object, file_age def throttle_count(index="", duration=0): if not index or index not in THROTTLE: return 0 now, removed = time.time(), 0 # Remove entries older than 1 hour for entry in THROTTLE[index][:]: if entry < now-duration: THROTTLE[index].remove(entry) removed += 1 else: break # First entry found under duration age so all others will also be under as well if removed: Log.Root("Throttle '{}' count reduced by '{}'".format(index, removed)) return len(THROTTLE[index]) def throttle_add(index=""): if index: if index not in THROTTLE: THROTTLE[index] = [] THROTTLE[index].append(time.time()) def LoadFile(filename="", relativeDirectory="", url="", headers={}, data=None, cache=CACHE_1DAY*6, sleep=0, throttle=["", 0, 0]): ''' Load file in Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems if cache time not passed ''' headers = UpdateDict(headers, COMMON_HEADERS) if filename.endswith(".gz"): filename = filename[:-3] # Remove and '.gz' from the local filename as it will be decompressed at pull # Load from disk if present file_object, file_age = LoadFileCache(filename, relativeDirectory) if file_object: Log.Debug("common.LoadFile() - File cached locally - Filename: '{file}', Age: '{age:.2f} days', Limit: '{limit} days', url: '{url}'".format(url=url, file=os.path.join(relativeDirectory, filename), age=file_age/CACHE_1DAY, limit=cache/CACHE_1DAY)) #File not cached OR cache older than passed cache age / adjusted AniDB age file_downloaded = None if not file_object or file_age > cache: # Check to see if we are at throttle max and needs to be put on hold # Done before lock is aquired to alow other threads to move forward while throttle[0]: # Only check if throttle index is defined count = throttle_count(throttle[0], throttle[1]) if count >= throttle[2]: Log.Root("Throttle max hit {}. Waiting 60 sec for headroom".format(throttle)) time.sleep(60) else: # Add in this pull into the throttle count and continue on throttle_add(throttle[0]) Log.Root("Throttle: '{}', Duration: {}, Count: {} of {}".format(throttle[0], throttle[1], count+1, throttle[2])) break # Thread lock aquire netLock.acquire() # Safeguard if netLock does not work as expected while 'LoadFile' in netLocked and netLocked['LoadFile'][0]: Log.Root("Waiting for lock: 'LoadFile'") time.sleep(1) netLocked['LoadFile'] = (True, int(time.time())) #Log.Root("Lock acquired: 'LoadFile'") # Download URL to memory, Plex cache to 1 day try: file_downloaded = HTTP.Request(url, headers=headers, data=data, timeout=60, cacheTime=CACHE_1DAY).content #'Accept-Encoding':'gzip' # Loaded with Plex cache, str prevent AttributeError: 'HTTPRequest' object has no attribute 'find', None if 'thetvdb' in url else if url.endswith(".gz"): file_downloaded = decompress(file_downloaded) except Exception as e: Log.Error("common.LoadFile() - issue loading url: '{}', filename: '{}', Headers: {}, Exception: '{}'".format(url, filename, headers, e)) # issue loading, but not AniDB banned as it returns "<error>Banned</error>" else: Log.Root("Downloaded URL '{}'".format(url)) # Sleeping after call completion to prevent ban time.sleep(sleep) # Safeguard if netLock does not work as expected netLocked['LoadFile'] = (False, 0) #Log.Root("Lock released: 'LoadFile'") # Thread lock release netLock.release() # Donwnloaded File checks and saving as cache #if str(file).startswith("<Element error at ") or file in ('<error>Banned</error>', '<error>aid Missing or Invalid</error>'): if file_downloaded: file_downloaded_object = ObjectFromFile(file_downloaded) if not file_downloaded_object: Log.Error('common.LoadFile() - File received but failed validity, file: "{}"'.format(file_downloaded)) elif url.endswith('.xml') and len(file_downloaded)<24: Log.Error('common.LoadFile() - File received too small (<24 bytes), file: "{}"'.format(file_downloaded)) elif file_downloaded.startswith("<error"): Log.Error('common.LoadFile() - Error response received, file: "{}"'.format(file_downloaded)); return file_downloaded_object else: SaveFile(filename, file_downloaded, relativeDirectory); return file_downloaded_object return file_object ### Download images and themes for Plex ############################################################################################################################### def metadata_download(metadata, metatype, url, filename="", num=99, url_thumbnail=None): if metatype==metadata.posters: string = "posters" elif metatype==metadata.art: string = "art" elif metatype==metadata.banners: string = "banners" elif metatype==metadata.themes: string = "themes" elif filename.startswith("TVDB/episodes/"): string = "thumbs" else: string = "seasons" if url in metatype: Log.Info("url: '%s', num: '%d', filename: '%s'*" % (url, num, filename)) else: file, status = None, "" try: if filename and Data.Exists(filename): status += ", Found locally"; file = Data.Load(filename) else: file = (ssl_open((url_thumbnail or url).replace('thetvdb.com', 'thetvdb.plexapp.com')) if 'thetvdb.com' in url else False) or ssl_open(url_thumbnail or url) if file: status += ", Downloaded and Saved locally"; SaveFile(filename, file) if file: metatype[ url ] = Proxy.Preview(file, sort_order=num) if url_thumbnail else Proxy.Media(file, sort_order=num) # or metatype[ url ] != proxy_item # proxy_item = except Exception as e: Log.Info("common.metadata_download() - Exception: {}, url: '{}', filename: '{}'".format(e, url, filename)); return downloaded[string] = downloaded[string] + 1 def cleanse_title(string): """ Cleanse title and translate anidb '`' """ DeleteChars = "" ReplaceChars = maketrans("`:/*?-.,;_", " ") #~ if len(string)<=len(String.StripDiacritics(string))+2: string = String.StripDiacritics(string) #else there is jap characters scrubebd outs try: string2 = string.encode('ascii', 'replace') # Encode into Ascii, prevent: UnicodeDecodeError: 'utf8' codec can't decode bytes in position 13-14: invalid continuation byte except: pass else: if not string2.count('?'): string=string2 while re.search(r'\([^\(\)]*?\)', string): string = re.sub(r'\([^\(\)]*?\)', ' ', string) while re.search(r'\[[^\[\]]*?\]', string): string = re.sub(r'\[[^\[\]]*?\]', ' ', string) # string = "qwerty [asdf] zxcv [vbnm] ghjk [tyui]" > 'qwerty zxcv ghjk ', string = "qwerty [asdf zxcv [vbnm] ghjk tyui]" > 'qwerty ' return " ".join(str(unicodedata.normalize('NFC', unicode(string.lower()))).translate(ReplaceChars, DeleteChars).split()) # str needed for translate def write_logs(media, movie, error_log, source, AniDBid, TVDBid): """ HAMA - Load logs, add non-present entried then Write log files to Plug-in /Support/Data/com.plexapp.agents.hama/DataItems """ Log.Info("=== common.write_logs() ===".ljust(157, '=')) if source == 'anidb': source = 'AniDBid' elif source == 'tvdb': source = 'TVDBid' library = GetLibraryRootPath(GetMediaDir(media, movie))[0] for char in list("\\/:*?<>|~;"): if char in library: library = library.replace(char, '-') ### File lock ### sleep_time_max = 10 for log in error_log: sleep_time = 0 while log in netLocked and netLocked[log][0]: time.sleep(1) sleep_time += 1 if sleep_time > sleep_time_max: Log.Error("Could not obtain the lock in {}sec & lock age is {}sec. Skipping log update.".format(sleep_time_max, int(time.time())-netLocked[1] if 1 in netLocked else "never")) continue #break #netLock.acquire() netLocked[log] = (True, int(time.time())) ### Load previous entries ### Log.Info("{log:<{width}}: {content}".format(log=log, width=max(map(len, error_log)), content=str(error_log[log]))) error_log_array = {} log_line_separator = "<br />\r\n" error_log_file = os.path.join('_Logs', library+' - '+log+'.htm') if Data.Exists(error_log_file): for line in Data.Load(error_log_file).split(log_line_separator): if "|" in line: error_log_array[line.split("|", 1)[0].strip()] = line.split("|", 1)[1].strip() ### Remove this serie entry ### if not log in ["Missing Episodes", "Missing Specials"]: keys = ["AniDBid: "+AniDBid, "AniDBid: "+WEB_LINK % (ANIDB_SERIE_URL + AniDBid, AniDBid), "TVDBid: "+ TVDBid, "TVDBid: "+WEB_LINK % (TVDB_SERIE_URL + TVDBid, TVDBid)] elif not movie and (len(media.seasons)>2 or max(map(int, media.seasons.keys()))>1): keys = ["TVDBid: %s" % (WEB_LINK % (TVDB_SERIE_URL + TVDBid, TVDBid) )] else: keys = ["%s: %s" % (source, WEB_LINK % (ANIDB_SERIE_URL + AniDBid if source == "AniDBid" else TVDB_SERIE_URL + TVDBid, AniDBid if source == "AniDBid" else TVDBid) )] deleted = [] for key in keys: if key in error_log_array: deleted.append(error_log_array[key]) del(error_log_array[key]) # remove entry, needs updating or removal... if not deleted and not error_log[log]: netLocked[log] = (False, 0); continue # didn't delete anything, no entry to add, the only case when we skip ### Generate prefix, append to error_log_array and Save error_log_array ### log_prefix = '' if log == 'TVDB posters missing': log_prefix = "Series posters must be 680x1000 and be JPG format. They should not contain spoilers, nudity, or vulgarity. Please ensure they are of high quality with no watermarks, unrelated logos, and that they don't appear stretched." + log_line_separator if log == 'Plex themes missing': log_prefix = WEB_LINK % ("https://plexapp.zendesk.com/hc/en-us/articles/201572843","Restrictions") + log_line_separator for entry in error_log[log]: error_log_array[entry.split("|", 1)[0].strip()] = entry.split("|", 1)[1].strip() if len(entry.split("|", 1))>=2 else "" try: Data.Save(error_log_file, log_prefix + log_line_separator.join(sorted([str(key)+" | "+str(error_log_array[key]) for key in error_log_array], key = lambda x: x.split("|",1)[1] if x.split("|",1)[1].strip().startswith("Title:") and not x.split("|",1)[1].strip().startswith("Title: ''") else int(re.sub(r"<[^<>]*>", "", x.split("|",1)[0]).strip().split()[1].strip("'")) ))) except Exception as e: Log.Error("Exception: '%s'" % e) netLocked[log] = (False, 0) def Other_Tags(media, movie, status): # Other_Tags(media, Dict(AniDB_dict, 'status') or Dict(TheTVDB_dict, 'status')) """ Add genre tags: Status, Extension, Dubbed/Subbed """ tags = [] if movie: file = media.items[0].parts[0].file else: s = media.seasons.keys()[0] if media.seasons.keys()[0]!='0' else media.seasons.keys()[1] if len(media.seasons.keys()) >1 else None if s: e = media.seasons[s].episodes.keys()[0] file = media.seasons[s].episodes[e].items[0].parts[0] else: file = '' ### Status tag: #"Ended" or "Continuing", "" from:AniDB, TVDB ### if status in ('Ended', 'Continuing'): tags.append(status) if file: ### Extension tag ### tags.append(str(os.path.splitext(file.file)[1].lstrip('.'))) # avoid u'ext' ### Tag Dubbed/Subbed ###yyy streams = {1:[], 2:[], 3:[]} #StreamTypes = {1: 'video', 2: 'audio', 3: 'subtitle'} for stream in file.streams: if stream.type in streams: streams[stream.type].append(stream.language if hasattr(stream, 'language') else "") for audio in streams[2]: if not streams[3]: tags.extend([audio + " Dubbed" for audio in streams[2]]) else: tags.extend([audio + " Subbed " + subtitle for audio in streams[2] for subtitle in streams[3]]) return tags ### Update meta field ### def UpdateMetaField(metadata_root, metadata, meta_root, fieldList, field, source, movie, source_list): if field not in meta_root: Log.Info('[!] field: "{}" not in meta_root, source: "{}"'.format(field, source)); return if type(metadata).__name__=="tuple": ep_string = ' new season: {:<2}, new_episode: {:<3}'.format(metadata[3], metadata[4]) metadata = metadata[0].seasons[metadata[1]].episodes[metadata[2]] is_episode = True else: ep_string, is_episode = "", False meta_old = getattr(metadata, field) # getattr( metadata, field, None) meta_new = meta_root[field] meta_new_short = (meta_new[:80]).replace("\n", "\\n").replace("\r", "\\r")+'..' if isinstance(meta_new, basestring) and len(meta_new)> 80 else meta_new MetaFieldList = ('directors', 'writers', 'producers', 'guest_stars', 'collections', 'genres', 'tags', 'countries') MetaRoleList = ('directors', 'writers', 'producers', 'guest_stars', 'roles') MetaIntList = ('year', 'absolute_number', 'duration') ### Prepare data for comparison ### try: if isinstance(meta_new, int): if field == 'rating': meta_new = float(meta_new) if isinstance(meta_new, basestring) or isinstance(meta_new, str): if field == 'rating': meta_new = float(meta_new) if field == 'title_sort': meta_new = SortTitle(meta_new) if field == 'originally_available_at': meta_new = Datetime.ParseDate(meta_new).date() if field in MetaIntList: meta_new = int(meta_new) if meta_new.isdigit() else None if field in MetaFieldList: meta_new = re.sub(r'\([^)]*\)', '', meta_new) meta_new = meta_new.split(',' if ',' in meta_new else '|') if isinstance(meta_new, list) and field in MetaRoleList: meta_new = [{'role': Dict(obj, 'role'), 'name': Dict(obj, 'name'), 'photo': Dict(obj,'photo')} if isinstance(obj, dict) else \ {'role': None, 'name': obj, 'photo': None} for obj in meta_new] except Exception as e: Log.Info("[!] 1{field:<23} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e)) try: if not isinstance(meta_new, list): meta_old_value = meta_old elif field in MetaRoleList: meta_old_value = [ {'role': role_obj.role, 'name': role_obj.name, 'photo': role_obj.photo} for role_obj in meta_old] #if role_obj.role] else: meta_old_value = [x for x in meta_old] #meta_old_value = [ {'role': role_obj.role, 'name': role_obj.name, 'photo': role_obj.photo} for role_obj in meta_old] except Exception as e: Log.Info("[!] 2{field:<23} Sources: {sources:<11} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e)) ### Update ONLY IF REQUIRED ### if '|' in Prefs[field]: if metadata_root==metadata: sources = '|'.join([Prefs[field].split('|')[is_episode].replace(source, '('+source+')'), Prefs[field].split('|')[1]]) else: sources = '|'.join([Prefs[field].split('|')[is_episode], Prefs[field].split('|')[1].replace(source, '('+source+')')]) else: sources = Prefs[field].replace(source, '('+source+')') if isinstance(meta_new, dict) and field=='posters': Log.Info('[?] meta_new: {}\n meta_old: {}'.format(DictString(meta_new, 1, 4), DictString(sorted(meta_old.keys(), key=natural_sort_key), 1, 4))) # Can't print meta_old values as plex custom class without a string print call if meta_new == meta_old_value or field not in MetaRoleList and (isinstance(meta_new, dict) and set(meta_new.keys()).issubset(meta_old.keys()) or isinstance(meta_new, list) and set(meta_new)== set(meta_old)): Log.Info("[=] {field:<23} {len:>4} Sources: {sources:<60} Inside: '{source_list}' Value: '{value}'".format(field=field, len="({:>2})".format(len(meta_root[field])) if isinstance(meta_root[field], (list, dict)) else "", sources=sources, value=meta_new_short, source_list=source_list)) else: Log.Info("[x] {field:<23} {len:>4} Sources: {sources:<60} Inside: '{source_list}' Value: '{value}'".format(field=field, len="({:>2})".format(len(meta_root[field])) if isinstance(meta_root[field], (list, dict)) else "", sources=sources, value=meta_new_short, source_list=source_list)) if isinstance(meta_new, dict) and field in ['posters', 'banners', 'art', 'themes', 'thumbs']: for url in meta_new: if not url in meta_old and isinstance(meta_new[url], tuple): metadata_download(metadata_root, meta_old, url, meta_new[url][0], meta_new[url][1], meta_new[url][2]) elif isinstance(meta_new, list) and field in MetaRoleList: try: meta_old.clear() for item in meta_new: meta_role = meta_old.new() if not isinstance(item, dict): setattr(meta_role, 'name', item) #list of names instead of list of people, but should already be list of people else: for field in item: if item[field]: setattr(meta_role, field, item[field]) except Exception as e: Log.Info("[!] {field:<29} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e)) else: try: setattr(metadata, field, meta_new) #Type: {format:<20} #format=type(meta_old).__name__+"/"+type(meta_new).__name__, except Exception as e: Log.Info("[!] {field:<29} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e)) def UpdateMeta(metadata, media, movie, MetaSources, mappingList): """ Update all metadata from a list of Dict according to set priorities """ Log.Info("=== common.UpdateMeta() ===".ljust(157, '=')) # Display source field table Log.Info("Fields in Metadata Sources per movie/serie, season, episodes") for source in MetaSources: if MetaSources[source]: Log.Info("- {source:<11} : {fields}".format(source=source, fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '', field, len(MetaSources[source][field]) if isinstance(MetaSources[source][field], (list, dict)) else 1) for i, field in enumerate(MetaSources[source])))) if type(MetaSources[source]).__name__ == 'NoneType': Log.Info("[!] source: '%s', type: '%s', bad return in function, should return an empty dict" % (source, type(MetaSources[source]).__name__)) if 'seasons' in (MetaSources[source] if MetaSources[source] else {}) : season_fields, episode_fields, ep_nb, ep_invalid = {}, {}, 0, 0 for season in sorted(MetaSources[source]['seasons'], key=natural_sort_key): for field in MetaSources[source]['seasons'][season]: if field in FieldListSeasons: season_fields[field] = (season_fields[field] + 1) if field in season_fields else 1 elif not field=="episodes" and not field.isdigit(): Log.Info("Season Field Unrecognised: '{}' in source: '{}'".format(field, source)) for episode in sorted(MetaSources[source]['seasons'][season]['episodes'], key=natural_sort_key) if 'episodes' in MetaSources[source]['seasons'][season] else []: for field in MetaSources[source]['seasons'][season]['episodes'][episode]: if field in FieldListEpisodes: episode_fields[field] = episode_fields[field] + 1 if field in episode_fields else 1 elif field is not 'language_rank': Log.Info(" {:<23} Season {:>3}, Episode: {:>3} is not a valid metadata field, value: '{!s}'".format(field, season, episode, MetaSources[source]['seasons'][season]['episodes'][episode][field])); ep_invalid+=1 ep_nb+=1 if len(season_fields ): Log.Info(" - Seasons ({nb:>3}): {fields}".format(nb=len(MetaSources[source]['seasons']), fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '',field, season_fields[field]) for i, field in enumerate(season_fields)))) if len(episode_fields): Log.Info(" - Episodes ({nb:>3}): {fields}".format(nb=ep_nb-ep_invalid , fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '',field, episode_fields[field]) for i, field in enumerate(episode_fields)))) Log.Info("".ljust(157, '-')) #if AniDB_dict['originally_available_at']: AniDB_dict['year'] = AniDB_dict['originally_available_at'].year ### Metadata review display. Legend for the '[ ]' display: # [=] already at the right value for that source # [x] Xst/nd/th source had the field # [#] no source for that field # [!] Error assigning #Update engine Log.Info("Metadata Fields (items #), type, source provider, value") count = {'posters':0, 'art':0, 'thumbs':0, 'banners':0, 'themes':0} languages = Prefs['EpisodeLanguagePriority'].replace(' ', '').split(',') #posters=[] #fields = metadata.attrs.keys() #if 'seasons' in fields: fields.remove('seasons') for field in FieldListMovies if movie else FieldListSeries: meta_old = getattr(metadata, field) if field in ('posters', 'banners', 'art'): meta_old.validate_keys([]) #This will allow the images to get readded at the correct priority level if preferences are updates and meta is refreshed source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, field) ] language_rank, language_source = len(languages)+1, None for source in [source.strip() for source in (Prefs[field].split('|')[0] if '|' in Prefs[field] else Prefs[field]).split(',') if Prefs[field]]: if source in MetaSources: #For AniDB assigned series will favor AniDB summary even if TheTVDB is before in the source order for summary fields IF the anidb series is not mapped to TheTVDB season 1. if Dict(MetaSources, source, field): if field=='genres'and ('|' in MetaSources[source]['genres'] or ',' in MetaSources[source]['genres']): MetaSources[source]['genres'] = MetaSources[source]['genres'].split('|' if '|' in MetaSources[source]['genres'] else ',') MetaSources[source]['genres'].extend( Other_Tags(media, movie, Dict(MetaSources, 'AniDB', 'status')) ) if field=='title': title, rank = Dict(MetaSources, source, 'title'), Dict(MetaSources, source, 'language_rank') if rank in (None, ''): rank = len(languages) if rank<language_rank: MetaSources[source]['title_sort'], language_rank, language_source = SortTitle(title, IsIndex(languages, rank)), rank, source else: UpdateMetaField(metadata, metadata, MetaSources[source], FieldListMovies if movie else FieldListSeries, field, source, movie, source_list) if field in count: count[field] = count[field] + 1 if field!='title' and (field not in ['posters', 'art', 'banners', 'themes', 'thumbs', 'title']): break elif not source=="None": Log.Info("[!] '{}' source not in MetaSources dict, please Check case and spelling".format(source)) else: if field=='title': UpdateMetaField(metadata, metadata, Dict(MetaSources, language_source, default={}), FieldListMovies if movie else FieldListSeries, 'title', language_source, movie, source_list) #titles have multiple assignments, adding only once otherwise duplicated field outputs in logs elif not Dict(count, field) and Prefs[field]!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list} Values: {values}".format(field=field, sources='' if field=='season' else Prefs[field], source_list=source_list, values=Dict(MetaSources, source, field))) #if field=='posters': metadata.thumbs.validate_keys(meta_new.keys()) if not movie: ### AniDB poster as season poster backup ### #if (metadata.id.startswith("tvdb") or max(map(int, media.seasons.keys())) >1) and Dict(mappingList, 'defaulttvdbseason'): # defaulttvdb season isdigit and assigned to 1 tvdb season (even if it is season 0) # if Dict(MetaSources, 'AniDB', 'posters'): SaveDict(MetaSources['AniDB']['posters'], MetaSources, 'AniDB', 'seasons', Dict(mappingList, 'defaulttvdbseason') if Dict(mappingList, 'defaulttvdbseason').isdigit() else '1', 'posters') # if Dict(MetaSources, 'AniDB', 'summary'): SaveDict(MetaSources['AniDB']['summary'], MetaSources, 'AniDB', 'seasons', Dict(mappingList, 'defaulttvdbseason') if Dict(mappingList, 'defaulttvdbseason').isdigit() else '1', 'summary') ### Seasons ### #languages = Prefs['SerieLanguagePriority'].replace(' ', '').split(',') #count = {'posters':0, 'art':0} count = {'posters':0, 'art':0, 'thumbs':0, 'banners':0, 'themes':0} #@task #def UpdateEpisodes(metadata=metadata, MetaSources=MetaSources, count=count, season=season, episode=episode, cached_logs=cached_logs): cached_logs = {} #@parallelize #def addMeta(): season_posters_list = [] for season in sorted(media.seasons, key=natural_sort_key): # For each season, media, then use metadata['season'][season]... Log.Info(("metadata.seasons[{:>2}]".format(season)).ljust(157, '-')) source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, 'seasons', season, field) ] new_season = season for field in FieldListSeasons: #metadata.seasons[season].attrs.keys() meta_old = getattr(metadata.seasons[season], field) if field in ('posters', 'banners', 'art'): meta_old.validate_keys([]) #This will allow the images to get readded at the correct priority level if preferences are updates and meta is refreshed for source in [source.strip() for source in Prefs[field].split(',') if Prefs[field]]: if source in MetaSources: if Dict(MetaSources, source, 'seasons', season, field) or metadata.id.startswith('tvdb4'): if field=='posters': season_posters_list.extend(Dict(MetaSources, source, 'seasons', season, 'posters', default={}).keys()) UpdateMetaField(metadata, metadata.seasons[season], Dict(MetaSources, source, 'seasons', season), FieldListSeasons, field, source, movie, source_list) if field in count: count[field] = count[field] + 1 if field not in ['posters', 'art']: break elif not source=="None": Log.Info("[!] {} Sources: '{}' not in MetaSources".format(field, source)) else: if not Dict(count, field) and Dict(Prefs, field)!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list}".format(field=field, sources='' if field=='seasons' else Prefs[field], source_list=source_list)) ### Episodes ### languages = Prefs['EpisodeLanguagePriority'].replace(' ', '').split(',') for episode in sorted(media.seasons[season].episodes, key=natural_sort_key): Log.Info("metadata.seasons[{:>2}].episodes[{:>3}]".format(season, episode)) new_season, new_episode = season, episode source_title, title, rank = '', '', len(languages)+1 for field in FieldListEpisodes: # metadata.seasons[season].episodes[episode].attrs.keys() meta_old = getattr(metadata.seasons[season].episodes[episode], field) source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, 'seasons', new_season, 'episodes', new_episode, field) ] for source in [source_.strip() for source_ in (Prefs[field].split('|')[1] if '|' in Prefs[field] else Prefs[field]).split(',')]: #if shared by title and eps take later priority if source in MetaSources: if Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, field): if field=='title': language_rank = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'language_rank') if language_rank not in ('', None) and language_rank < rank or len(languages)< rank: #Manage title language for AniDB and TheTVDB by recording the rank source_title = source title = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'title' ) rank = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'language_rank') Log.Info('[?] rank: {:>1}, source_title: {:>7}, title: "{}"'.format(rank, source_title, title)) else: Log.Info('[!] title: {}, language_rank {}, rank: {}, len(languages): "{}"'.format(title, language_rank, rank, len(languages))) else: UpdateMetaField(metadata, (metadata, season, episode, new_season, new_episode), Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode), FieldListEpisodes, field, source, movie, source_list) if field in count: count[field] = count[field] + 1 if field!='title' and (field not in ['posters', 'art', 'banners', 'themes', 'thumbs', 'title']): break elif not source=="None": Log.Info("[!] '{}' source not in MetaSources dict, please Check case and spelling".format(source)) else: if field=='title' and source_title: UpdateMetaField(metadata, (metadata, season, episode, new_season, new_episode), Dict(MetaSources, source_title, 'seasons', new_season, 'episodes', new_episode), FieldListEpisodes, field, source_title, movie, source_list) elif not Dict(count, field) and field!='seasons' and Prefs[field]!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list}".format(field=field, sources='' if field=='seasons' else Prefs[field], source_list=source_list)) if field=='thumbs': metadata.seasons[season].episodes[episode].thumbs.validate_keys(meta_new.keys()) # End Of for field # End Of for episode else: metadata.seasons[season].posters.validate_keys(season_posters_list) # End of for season Log.Info("".ljust(157, '-')) global downloaded; downloaded = {'posters':0, 'art':0, 'seasons':0, 'banners':0, 'themes':0, 'thumbs': 0} def SortTitle(title, language="en"): """ SortTitle """ dict_sort = { 'en': ["The", "A", "An"], 'fr': ["Le", "La", "Les", "L", "Un", "Une ", "Des "], 'sp': ["El", "La", "Las", "Lo", "Los", "Uno ", "Una "] } title = title.replace("'", " ") prefix = title.split (" ", 1)[0] #Log.Info("SortTitle - title:{}, language:{}, prefix:{}".format(title, language, prefix)) return title.replace(prefix+" ", "", 1) if language in dict_sort and prefix in dict_sort[language] else title def poster_rank(source, image_type, language='en', rank_adjustment=0): """ { "id": "PosterLanguagePriority", "label": "TheTVDB Poster Language Priority", "type": "text", "default": ... }, { "id": "posters", "label": "TS-M 'poster'", "type": "text", "default": ... }, { "id": "art", "label": "T--M 'art'", "type": "text", "default": ... }, { "id": "banners", "label": "TS-- 'banners'", "type": "text", "default": ... }, """ max_rank = 100 if image_type == 'seasons': image_type = 'posters' language_posters = [language.strip() for language in Prefs['PosterLanguagePriority'].split(',')] priority_posters = [provider.strip() for provider in Prefs[image_type ].split(',')] lp_len = len(language_posters) pp_len = len(priority_posters) lp_pos = language_posters.index(language) if language in language_posters else lp_len pp_pos = priority_posters.index(source) if source in priority_posters else pp_len lp_block_size = max_rank/lp_len pp_block_size = lp_block_size/pp_len rank = (lp_pos*lp_block_size)+(pp_pos*pp_block_size)+1+rank_adjustment if rank > 100: rank = 100 if rank < 1: rank = 1 #Log.Info(" - language: {:<10}, lp_pos: {}, lp_block_size: {}, language_posters: {}".format(language, lp_pos, lp_block_size, language_posters)) #Log.Info(" - source: {:<10}, pp_pos: {}, pp_block_size: {}, priority_posters: {}".format(source, pp_pos, pp_block_size, priority_posters)) #Log.Info(" - image_type: {}, rank: {}".format(image_type, rank)) return rank
gpl-3.0
8,189,609,514,421,203,000
68.007491
382
0.627191
false
flynx/pli
pli/pattern/proxy/_callproxy.py
1
9071
#======================================================================= #======================================================================= __version__ = '''0.1.00''' __sub_version__ = '''20040223152229''' __copyright__ = '''(c) Alex A. Naanou 2003''' #----------------------------------------------------------------------- ##import sys ##import new ##import types ##import weakref import operator from pli.functional import * #----------------------------------------------------------------------- # # WARNING: this is not yet complete!!! # # NOTE: this works about twice faster than the *clever* version. ##!! BUG: this apears not to work with the % operator (e.g. '%d' % pobj) class callproxy(object): ''' this is a dumb callproxy. ''' __slots__ = ['p_obj', 'p_queue', 'p_cache', 'p_drop_refs', 'p_callback', 'p_safe', '__weakref__'] def __init__(self, obj, queue=None, cache=None, drop_refs=0, callback=None, safe=1): # do some correctness checks if safe: # callback test if callback != None: if not callable(callback): raise TypeError, 'callback object must be callable.' elif queue == None: raise TypeError, 'one of either callback or queue objects must be specified.' # test if queue supports append elif not hasattr(queue, 'append'): raise TypeError, 'queue object must have an "append" method.' # test if this supports dict interface if cache != None and (not hasattr(cache, '__setitem__') or not hasattr(cache, '__getitem__') or not hasattr(cache, 'keys')): raise TypeError, 'cache object must support "__setitem__", "__getitem__" and "keys" methods' ## # if this is true there is no point in this in the first place! ## elif callback == None and queue == None: ## raise TypeError, 'one of either callback or queue objects must be specified.' osetattr = object.__setattr__ osetattr(self, 'p_obj', obj) osetattr(self, 'p_queue', queue) osetattr(self, 'p_cache', cache) osetattr(self, 'p_drop_refs', drop_refs) osetattr(self, 'p_callback', callback) osetattr(self, 'p_safe', safe) def __getattr__(self, name): target = getattr(self.p_obj, name) if self.p_cache != None and hasattr(self.p_cache, 'update'): if target in self.p_cache.keys(): return self.p_cache[target] else: pobj = callproxy(target, self.p_queue, self.p_cache, self.p_drop_refs, self.p_callback, self.p_safe) self.p_cache.update({target: pobj}) return pobj return self.__class__(target, self.p_queue, self.p_cache, self.p_drop_refs, self.p_callback, self.p_safe) def __call__(self, *p, **n): # check if callable... if not callable(self.p_obj): self.p_obj(*p, **n) if self.p_queue != None: # queue the call self.p_queue.append(curry(self.p_obj, *p, **n)) # do the callback. if self.p_callback != None: return self.p_callback(*(self.p_obj,) + p, **n) return None elif self.p_callback != None: return self.p_callback(*(self.p_obj,) + p, **n) # we get here if safe is False... # WARNING: this is currently incompatible with the python version! return self.p_obj(*p, **n) def __setattr__(self, name, val): setattr(self.p_obj, name, val) def __delattr__(self, name): delattr(self.p_obj, name) def __repr__(self): return '<callproxy at %s to %.100s at %s>' % (hex(id(self)), self.p_obj.__class__.__name__, hex(id(self.p_obj))) def __str__(self): return str(self.p_obj) def __iter__(self): return self.p_obj.__iter__() def __hash__(self): return hash(self.p_obj) def __nonzero__(self): if hasattr(self.p_obj, '__nonzero__'): return self.p_obj.__nonzero__() elif hasattr(self.p_obj, '__len__'): return len(self.p_obj) else: return 1 def __len__(self): return len(self.p_obj) def __unicode__(self): return self.p_obj.__unicode__() def __cmp__(self, other): # get the original type if the other side is callproxy if isinstance(other, callproxy): other = other.p_obj return cmp(self.p_obj, other) ## def __lt__(self, other): ## return self.p_obj.__lt__(other) ## def __le__(self, other): ## return self.p_obj.__le__(other) ## def __eq__(self, other): ## return self.p_obj.__eq__(other) ## def __ne__(self, other): ## return self.p_obj.__ne__(other) ## def __gt__(self, other): ## return self.p_obj.__gt__(other) ## def __ge__(self, other): ## return self.p_obj.__ge__(other) # number interface... # NOTE: if you have the strength to read this section, be my guest! # (I did not even have the strength to write it.. :) ) def __add__(x, y): if isinstance(x, callproxy): x = (x).p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__add__(x, y) def __sub__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__sub__(x, y) def __mul__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__mul__(x, y) def __floordiv__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__floordiv__(x, y) def __truediv__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__truediv__(x, y) def __div__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__div__(x, y) def __mod__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__mod__(x, y) def __divmod__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__divmod__(y) def __pow__(x, y, z): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj if isinstance(z, callproxy): z = z.p_obj return x.__pow__(y, z) def __neg__(self): return operator.__neg__(self.p_obj) def __pos__(self): return operator.__pos__(self.p_obj) def __abs__(self): return operator.__abs__(self.p_obj) def __invert__(self): return operator.__invert__(self.p_obj) def __lshift__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__lshift__(x, y) def __rshift__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__rshift__(x, y) def __and__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__and__(x, y) def __xor__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__xor__(x, y) def __or__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return operator.__or__(x, y) def __int__(self): return int(self.p_obj) def __long__(self): return long(self.p_obj) def __float__(self): return float(self.p_obj) def __oct__(self): return oct(self.p_obj) def __hex__(self): return hex(self.p_obj) def __iadd__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__iadd__(y) def __isub__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__isub__(y) def __imul__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__imul__(y) def __ifloordiv__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__ifloordiv__(y) def __itruediv__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__itruediv__(y) def __idiv__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__idiv__(y) def __imod__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__imod__(y) def __ipow__(x, y, z): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj if isinstance(z, callproxy): z = z.p_obj return x.__ipow__(y, z) def __ilshift__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__ilshift__(y) def __irshift__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__irshift__(y) def __iand__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__iand__(y) def __ixor__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__ixor__(y) def __ior__(x, y): if isinstance(x, callproxy): x = x.p_obj if isinstance(y, callproxy): y = y.p_obj return x.__ior__(y) ##!!! #======================================================================= # vim:set ts=4 sw=4 nowrap :
bsd-3-clause
6,048,693,589,287,850,000
27.615142
127
0.578216
false
djtotten/workbench
setup.py
1
1964
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, sys from setuptools import setup readme = open('README.rst').read() long_description = readme doclink = ''' Documentation ------------- The full documentation is at http://workbench.rtfd.org. ''' history = open('HISTORY.rst').read().replace('.. :changelog:', '') exec(open('workbench/server/version.py').read()) setup( name='workbench', version=__version__, description='A scalable framework for security research and development teams.', long_description=readme + '\n\n' + doclink + '\n\n' + history, author='The Workbench Team', author_email='[email protected]', url='http://github.com/SuperCowPowers/workbench', packages=['workbench', 'workbench.server', 'workbench.server.bro', 'workbench.workers', 'workbench.workers.rekall_adapter', 'workbench.clients', 'workbench_apps', 'workbench_apps.workbench_cli'], package_dir={'workbench': 'workbench', 'workbench_apps': 'workbench_apps'}, include_package_data=True, scripts=['workbench/server/workbench_server', 'workbench_apps/workbench_cli/workbench'], tests_require=['tox'], install_requires=['cython', 'distorm3', 'elasticsearch', 'funcsigs', 'flask', 'filemagic', 'ipython', 'lz4', 'mock', 'numpy', 'pandas', 'pefile', 'py2neo==1.6.4', 'pymongo', 'pytest', 'rekall==1.0.3', 'requests', 'ssdeep==2.9-0.3', 'urllib3', 'yara', 'zerorpc', 'cython'], license='MIT', zip_safe=False, keywords='workbench security python', classifiers=[ 'Topic :: Security', 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS', 'Operating System :: Unix', 'Natural Language :: English', 'Programming Language :: Python :: 2.7' ] )
mit
-9,168,152,055,158,455,000
37.509804
95
0.611507
false
heeraj123/oh-mainline
mysite/missions/svn/views.py
1
7651
# This file is part of OpenHatch. # Copyright (C) 2010 Jack Grigg # Copyright (C) 2010 John Stumpo # Copyright (C) 2011 Krzysztof Tarnowski ([email protected]) # Copyright (C) 2010, 2011 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import shutil import tempfile import json from django.shortcuts import render from mysite.missions.base.views import * from mysite.missions.svn import forms, view_helpers # POST handlers # Helper functions for form submissions. These functions are used to validate # input and/or modify the stored user information about missions, such as # recording that a mission was successfully completed. @login_required def resetrepo(request): """ Reset a user's mission repository and mark steps as uncompleted. """ if request.method != 'POST': return HttpResponseNotAllowed(['POST']) view_helpers.SvnRepository(request.user.username).reset() view_helpers.unset_mission_completed(request.user.get_profile(), 'svn_checkout') view_helpers.unset_mission_completed(request.user.get_profile(), 'svn_diff') view_helpers.unset_mission_completed(request.user.get_profile(), 'svn_commit') if 'stay_on_this_page' in request.GET: return HttpResponseRedirect(reverse('svn_main_page')) else: return HttpResponseRedirect(reverse('svn_checkout')) @login_required def diff_submit(request): """ Handle submitting the results of an svn diff to the mission """ data = {} data['svn_diff_form'] = forms.DiffForm(request.user.username) data['svn_diff_error_message'] = '' if request.method == 'POST': temp_svn_directory = tempfile.mkdtemp() form = forms.DiffForm(request.user.username, temp_svn_directory, request.POST) if form.is_valid(): try: form.commit_diff() view_helpers.set_mission_completed(request.user.get_profile(), 'svn_diff') return HttpResponseRedirect(reverse('svn_diff')) finally: shutil.rmtree(temp_svn_directory) shutil.rmtree(temp_svn_directory) data['svn_diff_form'] = form # If we get here, just hack up the request object to pretend it is a GET # so the dispatch system in the class-based view can use the GET handler. request.method = 'GET' return Diff.as_view()(request, extra_context_data=data) @login_required def checkout_submit(request): """ Handle svn checkout mission step form and completion """ data = {} data['svn_checkout_form'] = forms.CheckoutForm(request.user.username) data['svn_checkout_error_message'] = '' if request.method == 'POST': form = forms.CheckoutForm(request.user.username, request.POST) if form.is_valid(): view_helpers.set_mission_completed(request.user.get_profile(), 'svn_checkout') return HttpResponseRedirect(reverse('svn_checkout')) data['svn_checkout_form'] = form # If we get here, just hack up the request object to pretend it is a GET # so the dispatch system in the class-based view can use the GET handler. request.method = 'GET' return Checkout.as_view()(request, extra_context_data=data) class SvnBaseView(mysite.missions.base.views.MissionBaseView): """ A base class for a view of an SVN mission step. SVNBaseView is subclassed to provide GET handler classes to help with views of each mission step. """ mission_name = 'Using Subversion' def get_context_data(self, *args, **kwargs): # For now, we use the MissionPageState object to track a few things. # Eventually, the missions base will stop using the PageState object, # and all the work that class does will get merged into # MissionBaseView. data = super(SvnBaseView, self).get_context_data(*args, **kwargs) state = MissionPageState( self.request, passed_data=None, mission_name=self.mission_name) new_data, person = state.get_base_data_dict_and_person() if person: repo = view_helpers.SvnRepository(self.request.user.username) new_data.update({ 'repository_exists': repo.exists(), 'svn_checkout_done': view_helpers.mission_completed(person, 'svn_checkout'), 'svn_diff_done': view_helpers.mission_completed(person, 'svn_diff'), 'svn_commit_done': view_helpers.mission_completed(person, 'svn_commit'), }) if new_data['repository_exists']: new_data.update({ 'checkout_url': repo.public_trunk_url(), 'secret_word_file': forms.CheckoutForm.SECRET_WORD_FILE, 'file_for_svn_diff': forms.DiffForm.FILE_TO_BE_PATCHED, 'new_secret_word': view_helpers.SvnCommitMission.NEW_SECRET_WORD, 'commit_username': self.request.user.username, 'commit_password': repo.get_password()}) data.update(new_data) return data # Normal GET handlers. These are usually pretty short. They are based on # SvnBaseView. class MainPage(SvnBaseView): """ Main start page of the SVN mission """ this_mission_page_short_name = 'Start page' template_name = 'missions/svn/main_page.html' class LongDescription(SvnBaseView): """ Page with detailed information on SVN """ this_mission_page_short_name = 'About Subversion' template_name = 'missions/svn/about_svn.html' class Checkout(SvnBaseView): """ Checkout step of SVN mission """ login_required = True this_mission_page_short_name = 'Checking out' template_name = 'missions/svn/checkout.html' def get_context_data(self, *args, **kwargs): data = super(Checkout, self).get_context_data(*args, **kwargs) if kwargs.has_key('extra_context_data'): data.update(kwargs['extra_context_data']) else: data['svn_checkout_form'] = forms.CheckoutForm() return data class Diff(SvnBaseView): """ Diff step of the SVN mission """ login_required = True this_mission_page_short_name = 'Diffing your changes' mission_step_prerequisite = 'svn_checkout' template_name = 'missions/svn/diff.html' def get_context_data(self, *args, **kwargs): data = super(Diff, self).get_context_data(*args, **kwargs) if kwargs.has_key('extra_context_data'): data.update(kwargs['extra_context_data']) return data class Commit(SvnBaseView): """ Committing changes step of SVN mission""" login_required = True this_mission_page_short_name = 'Committing your changes' mission_step_prerequisite = 'svn_diff' template_name = 'missions/svn/commit.html' @login_required def commit_poll(request): """ Determines if entire mission is completed """ return HttpResponse(json.dumps(view_helpers.mission_completed(request.user.get_profile(), 'svn_commit')))
agpl-3.0
-5,017,947,890,406,898,000
39.696809
109
0.665534
false
saullocastro/structMan
structmanager/sas.py
2
2784
""" Structural Assemblies - SAs (:mod:`structmanager.sas`) ====================================================== .. currentmodule:: structmanager.sas """ class FrameAssembly(object): """Frame Assembly""" def __init__(self, name, args): args = outerflange, web, innerflange self.name = name self.outerflange = outerflange self.web = web self.innerflange = innerflange def __str__(self): return ('FrameAssembly: ' + self.name + '\n-' + str(self.outerflange) + '\n-' + str(self.web) + '\n-' + str(self.innerflange) ) def __repr__(self): return str(self) class FrameShearClipAssembly(object): """Frame Assembly with Shear Clip""" def __init__(self, name, args): shearclipskin, shearclipframe, outerflange, web, innerflange = args self.name = name self.shearclipskin = shearclipskin self.shearclipframe = shearclipframe self.outerflange = outerflange self.web = web self.innerflange = innerflange def __str__(self): return ('FrameShearClipAssembly: ' + self.name + '\n-' + str(self.shearclipskin) + '\n-' + str(self.shearclipframe) + '\n-' + str(self.outerflange) + '\n-' + str(self.web) + '\n-' + str(self.innerflange) ) def __repr__(self): return str(self) class StiffenedPanelAssembly(object): """Stiffened Panel Assembly""" def __init__(self, name, args): panel, fr1, fr2, str1, str2 = args self.name = name self.panel = panel self.fr1 = fr1 self.fr2 = fr2 self.str1 = str1 self.str2 = str2 def __str__(self): return ('Stiffened Panel Assembly: ' + self.name + '\n-' + str(self.panel) + '\n-' + str(self.fr1) + '\n-' + str(self.fr2) + '\n-' + str(self.str1) + '\n-' + str(self.str2)) def __repr__(self): return str(self) class StiffenedPanelCutout(object): """Stiffened Panel Cutout""" def __init__(self, name, args): panelcutout, str1, str2 = args self.name = name self.panelcutout = panelcutout self.str1 = str1 self.str2 = str2 def __str__(self): return ('Stiffened Panel Cutout: ' + self.name + '\n-' + str(self.panelcutout) + '\n-' + str(self.str1) + '\n-' + str(self.str2)) def __repr__(self): return str(self) sa_classes = [ FrameAssembly, FrameShearClipAssembly, StiffenedPanelAssembly, StiffenedPanelCutout, ]
bsd-3-clause
789,567,808,038,623,600
27.408163
75
0.510057
false
revolutionarysystems/merge
merge/views.py
1
12454
import os import zipfile from django.shortcuts import render from django.http import JsonResponse, HttpResponse from .docMerge import mergeDocument from .xml4doc import getData from random import randint from datetime import datetime from django.views.decorators.csrf import csrf_exempt #from .merge_utils import get_local_dir from .resource_utils import get_working_dir, get_local_txt_content,get_local_dir, refresh_files, zip_local_dirs, remote_link from traceback import format_exc from dash.forms import UploadZipForm from .config import remote_library, gdrive_root, local_root def getParamDefault(params, key, default, preserve_plus=False): try: result = params.get(key) if result == None: return default elif result == "": return default else: if preserve_plus: return result else: return result.replace("+"," ") except: return default def merge_raw(request, method="POST"): if method=="GET": params = request.GET else: params = request.POST abs_uri = request.build_absolute_uri() protocol, uri = abs_uri.split("://") site = protocol+"://"+uri.split("/")[0]+"/" id = getParamDefault(params, "identifier", str(randint(0,10000))) flowFolder = getParamDefault(params, "flow_folder", "/"+gdrive_root+"/Flows") flow = getParamDefault(params, "flow", "md") remoteTemplateFolder = getParamDefault(params, "template_folder", "/"+gdrive_root+"/Templates") remoteOutputFolder = getParamDefault(params, "output_folder", "/"+gdrive_root+"/Output") template_subfolder = getParamDefault(params, "template_subfolder", None) output_subfolder = getParamDefault(params, "output_subfolder", None) payload = getParamDefault(params, "payload", None, preserve_plus=True) payload_type = getParamDefault(params, "payload_type", None) test_case = getParamDefault(params, "test_case", None) data_folder = getParamDefault(params, "data_folder", "/"+gdrive_root+"/Test Data") data_file = getParamDefault(params, "data_file", None) data_root = getParamDefault(params, "data_root", None) branding_folder = getParamDefault(params, "branding_folder", "/"+gdrive_root+"/Branding") branding_file = getParamDefault(params, "branding_file", None) xform_folder = getParamDefault(params, "xform_folder", "/"+gdrive_root+"/Transforms") xform_file = getParamDefault(params, "xform_file", None) templateName = getParamDefault(params, "template", "AddParty.md") email = getParamDefault(params, "email", "[email protected]") templateName = templateName.replace("\\", "/") if template_subfolder: template_subfolder = template_subfolder.replace("\\", "/") subs = getData(test_case=test_case, payload=payload, payload_type=payload_type, params = params, local_data_folder="test_data", remote_data_folder = data_folder, data_file=data_file, xform_folder = xform_folder, xform_file=xform_file) if data_root: if data_root in subs: subs = subs[data_root] else: raise ValueError("Invalid data_root: " + data_root) if branding_file: branding_subs = getData(local_data_folder = "branding", remote_data_folder = branding_folder, data_file=branding_file) subs["branding"]= branding_subs subs["AgreementDate"]=datetime.now() subs["docs"]=[templateName] #subs["roles"]=[ # {"called":"Landlord", "values":["PropertyOwner", "AdditionalLandlord"]}, # {"called":"Tenant", "values":["ManuallyInvitedTenant", "AdditionalTenant"]}, # {"called":"Guarantor", "values":["Guarantor"]}, #] subs["site"]= site # return mergeDocument(flowFolder, flow, remoteTemplateFolder, templateName, id, subs, remoteOutputFolder, email=email, payload=payload) return mergeDocument(flowFolder, flow, remoteTemplateFolder, template_subfolder, templateName, id, subs, remoteOutputFolder, output_subfolder, email=email, payload=payload) def push_raw(request, method="POST"): if method=="GET": params = request.GET else: params = request.POST abs_uri = request.build_absolute_uri() protocol, uri = abs_uri.split("://") site = protocol+"://"+uri.split("/")[0]+"/" id = getParamDefault(params, "identifier", str(randint(0,10000))) flowFolder = getParamDefault(params, "flow_folder", "/"+gdrive_root+"/Flows") flow = getParamDefault(params, "flow", "md") remoteTemplateFolder = getParamDefault(params, "template_folder", "/"+gdrive_root+"/Templates") remoteOutputFolder = getParamDefault(params, "output_folder", "/"+gdrive_root+"/Output") payload = getParamDefault(params, "payload", None) templateName = getParamDefault(params, "template", "AddParty.md") template_subfolder = getParamDefault(params, "template_subfolder", None) output_subfolder = getParamDefault(params, "output_subfolder", None) email = getParamDefault(params, "email", "[email protected]") templateName = templateName.replace("\\", "/") if template_subfolder: template_subfolder = template_subfolder.replace("\\", "/") sep = templateName.rfind("/") if sep >=0: path = templateName[:sep] templateName = templateName[sep+1:] if template_subfolder == None: template_subfolder = path else: template_subfolder+="/"+path subs={} subs["site"]= site return mergeDocument(flowFolder, flow, remoteTemplateFolder, template_subfolder, templateName, id, subs, remoteOutputFolder, output_subfolder, email=email, payload=payload, require_template=False) def error_response(ex): overall_outcome = {} overall_outcome["success"]=False overall_outcome["messages"]=[{"level":"error", "message": str(ex)}] overall_outcome["steps"]=[] overall_outcome["traceback"]=format_exc(8) return overall_outcome def disallowed_response(reason): overall_outcome = {} overall_outcome["success"]=False overall_outcome["messages"]=[{"level":"error", "message": reason}] overall_outcome["steps"]=[] return overall_outcome def merge_raw_wrapped(request, method="POST"): try: return merge_raw(request, method=method) except Exception as ex: return error_response(ex) @csrf_exempt def merge(request): return JsonResponse(merge_raw_wrapped(request)) def push_raw_wrapped(request, method="POST"): try: return push_raw(request, method=method) except Exception as ex: return error_response(ex) @csrf_exempt def push(request): return JsonResponse(push_raw_wrapped(request)) def merge_get(request): return JsonResponse(merge_raw_wrapped(request, method="GET")) def file_raw(request): params = request.GET filename = getParamDefault(params, "name", None) download = getParamDefault(params, "download", "false") subfolder = getParamDefault(params, "path", "output") filepath = get_local_dir(subfolder) #file_content="" #with open(filepath+filename) as file: # for line in file: # file_content+=(line+"\n") if filename.find(".pdf")>=0: file = open(filepath+"/"+filename, 'rb') response = HttpResponse(file, content_type='application/pdf') if download =="true": response['Content-Disposition'] = "attachment; filename={}".format(filename) else: response['Content-Disposition'] = "inline; filename={}".format(filename) return response elif filename.find(".zip")>=0: file = open(filepath+"/"+filename, 'rb') response = HttpResponse(file, content_type='application/zip') response['Content-Disposition'] = "attachment; filename={}".format(filename) return response elif filename.find(".docx")>=0: file = open(filepath+"/"+filename, 'rb') response = HttpResponse(file, content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document') response['Content-Disposition'] = "attachment; filename={}".format(filename) return response else: cwd = get_working_dir() return HttpResponse(get_local_txt_content(cwd, subfolder, filename)) def file(request): return file_raw(request) def file_link(request): params = request.GET filename = getParamDefault(params, "name", None) subfolder = getParamDefault(params, "path", "output") response = {"remote":remote_link(filename, subfolder)} return JsonResponse(response) def refresh(request): if remote_library: try: params = request.GET local = getParamDefault(params, "local", "templates") if local.find("templates")==0: remote_default = local.replace(local.split("/")[0],"/"+gdrive_root+"/Templates") elif local.find("flows")==0: remote_default = local.replace(local.split("/")[0],"/"+gdrive_root+"/Flows") elif local.find("branding")==0: remote_default = local.replace(local.split("/")[0],"/"+gdrive_root+"/Branding") elif local.find("test_data")==0: remote_default = local.replace(local.split("/")[0],"/"+gdrive_root+"/Test Data") elif local.find("transforms")==0: remote_default = local.replace(local.split("/")[0],"/"+gdrive_root+"/Transforms") else: remote_default = None print("GDrive:", gdrive_root) print("refresh:", local, remote_default) remote = getParamDefault(params, "remote", remote_default) files = refresh_files(remote, local) response = {"refreshed_files":files} except Exception as ex: response = error_response(ex) else: response = disallowed_response("No connection to remote library") return JsonResponse(response) def zip(request): try: params = request.GET abs_uri = request.build_absolute_uri() protocol, uri = abs_uri.split("://") site = protocol+"://"+uri.split("/")[0]+"/" folders = getParamDefault(params, "folders", "templates,flows,transforms,test_data,branding") zip_file_name = getParamDefault(params, "name", "backup") target_dir = os.path.join(get_working_dir(),local_root) zip_file_name = zip_local_dirs(target_dir, zip_file_name, selected_subdirs = folders.split(",")) link = site+"file/?name="+zip_file_name.split(os.path.sep)[-1]+"&path=." response = {"zip_files":zip_file_name, "link":link} except Exception as ex: response = error_response(ex) return JsonResponse(response) def download_zip(request): try: params = request.GET abs_uri = request.build_absolute_uri() protocol, uri = abs_uri.split("://") site = protocol+"://"+uri.split("/")[0]+"/" folders = getParamDefault(params, "folders", "templates,flows,transforms,test_data,branding") zip_file_name = getParamDefault(params, "name", "backup") target_dir = os.path.join(get_working_dir(),local_root) zip_file_full = zip_local_dirs(target_dir, zip_file_name, selected_subdirs = folders.split(",")) zip_file_name = os.path.split(zip_file_full)[1] link = site+"file/?name="+zip_file_full.split(os.path.sep)[-1]+"&path=." response = {"zip_files":zip_file_full, "link":link} except Exception as ex: response = error_response(ex) file = open(zip_file_full, 'rb') response = HttpResponse(file, content_type='application/zip') response['Content-Disposition'] = "attachment; filename={}".format(zip_file_name) return response # return JsonResponse(response) @csrf_exempt def upload_zip(request): form = UploadZipForm(request.POST, request.FILES) target = os.path.join(get_working_dir(),local_root,request.FILES['file']._name) handle_uploaded_zip(request.FILES['file'], target) return JsonResponse({"file":target}) def handle_uploaded_zip(f, target): with open(target, 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) zfile = zipfile.ZipFile(target) zfile.extractall(os.path.join(get_working_dir()))
bsd-3-clause
-295,670,679,078,170,500
43.320285
238
0.648306
false
fumitoh/modelx
modelx/tests/testdata/pandas_compat/fastlife/Projection/Policy/__init__.py
1
10940
"""Source module to create ``Policy`` space from. This module is a source module to create ``Policy`` space and its sub spaces from. The formulas of the cells in the ``Policy`` space are created from the functions defined in this module. The ``Policy`` space is the base space of the policy spaces for individual policies, which are derived from and belong to the ``Policy`` space as its dynamic child spaces. The policy spaces for individual policies are parametrized by ``PolicyID``. For example, to get the policy space of the policy whose ID is 171:: >> pol = model.Policy(171) The cells in a policy space for each individual policy retrieve input data, calculate and hold values of policy attributes specific to that policy, so various spaces in :mod:`Input<simplelife.build_input>` must be accessible from the ``Policy`` space. .. rubric:: Projects This module is included in the following projects. * :mod:`simplelife` * :mod:`nestedlife` * :mod:`ifrs17sim` * :mod:`solvency2` .. rubric:: Space Parameters Attributes: PolicyID: Policy ID .. rubric:: References in Base Attributes: PolicyData: Input.PolicyData ProductSpec: Input.ProductSpec LifeTable: LifeTable Gen: Generation key .. rubric:: References in Sub Attributes: Product: Product key PolicyType: Policy type key Gen: Generation key Channel: Channel key Sex: ``M`` for Male, ``F`` for Female Duration: Number of years lapsed. 0 for new business IssueAge: Issue age PremFreq: Number of premium payments per year. 12 for monthly payments PolicyTerm: Policy term in year PolicyCount: Number of policies SumAssured: Sum Assured per policy """ from modelx.serialize.jsonvalues import * _formula = None _bases = [] _allow_none = None _spaces = [] # --------------------------------------------------------------------------- # Cells def AnnPremRate(): """Annualized Premium Rate per Sum Assured""" return GrossPremRate() * PremFreq().where(PremFreq() != 0, other=1/10) def CashValueRate(t): """Cash Value Rate per Sum Assured""" return np.maximum(ReserveNLP_Rate('PREM', t) - SurrCharge(t), 0) def GrossPremRate(): """Gross Premium Rate per Sum Assured per payment""" data = pd.concat([PolicyData, LoadAcqSA(), LoadMaintPrem(), LoadMaintPrem2(), LoadMaintSA(), LoadMaintSA2(), IntRate('PREM'), TableID('PREM')], axis=1) def get_value(pol): prod = pol['Product'] alpha = pol['LoadAcqSA'] beta = pol['LoadMaintPrem'] delta = pol['LoadMaintPrem2'] gamma = pol['LoadMaintSA'] gamma2 = pol['LoadMaintSA2'] freq = pol['PremFreq'] x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm'] comf = LifeTable[pol['Sex'], pol['IntRate_PREM'], pol['TableID_PREM']] if prod == 'TERM' or prod == 'WL': return (comf.Axn(x, n) + alpha + gamma * comf.AnnDuenx(x, n, freq) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / (1-beta-delta) / freq / comf.AnnDuenx(x, m, freq) elif prod == 'ENDW': return (comf.Exn(x, n) + comf.Axn(x, n) + alpha + gamma * comf.AnnDuenx(x, n, freq) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / (1-beta-delta) / freq / comf.AnnDuenx(x, m, freq) else: raise ValueError('invalid product') result = data.apply(get_value, axis=1) result.name = 'GrossPremRate' return result def GrossPremTable(): """Gross premium table""" return None def InitSurrCharge(): """Initial Surrender Charge Rate""" def get_value(pol): prod, polt, gen = pol['Product'], pol['PolicyType'], pol['Gen'] term = pol['PolicyTerm'] param1 = SpecLookup.match("SurrChargeParam1", prod, polt, gen).value param2 = SpecLookup.match("SurrChargeParam2", prod, polt, gen).value if param1 is None or param2 is None: raise ValueError('SurrChargeParam not found') return param1 + param2 * min(term / 10, 1) result = PolicyData.apply(get_value, axis=1) result.name = 'InitSurrCharge' return result def IntRate(RateBasis): """Interest Rate""" if RateBasis == 'PREM': basis = 'IntRatePrem' elif RateBasis == 'VAL': basis = 'IntRateVal' else: raise ValueError('invalid RateBasis') def get_value(pol): result = SpecLookup.match(basis, pol["Product"], pol["PolicyType"], pol["Gen"]).value if result is not None: return result else: raise ValueError('lookup failed') result = PolicyData.apply(get_value, axis=1) result.name = 'IntRate_' + RateBasis return result def LoadAcqSA(): """Acquisition Loading per Sum Assured""" param1 = Product().apply(lambda prod: SpecLookup("LoadAcqSAParam1", prod)) param2 = Product().apply(lambda prod: SpecLookup("LoadAcqSAParam2", prod)) result = param1 + param2 * np.minimum(PolicyTerm() / 10, 1) result.name = 'LoadAcqSA' return result def LoadMaintPrem(): """Maintenance Loading per Gross Premium""" def get_value(pol): if SpecLookup("LoadMaintPremParam1", pol["Product"]) is not None: return SpecLookup("LoadMaintPremParam1", pol["Product"]) elif SpecLookup("LoadMaintPremParam2", pol["Product"]) is not None: param = SpecLookup("LoadMaintPremParam2", pol["Product"]) return (param + min(10, pol["PolicyTerm"])) / 100 else: raise ValueError('LoadMaintPrem parameters not found') result = PolicyData.apply(get_value, axis=1) result.name = 'LoadMaintPrem' return result def LoadMaintSA(): """Maintenance Loading per Sum Assured during Premium Payment""" def get_value(pol): result = SpecLookup.match("LoadMaintSA", pol["Product"], pol["PolicyType"], pol["Gen"]).value if result is not None: return result else: raise ValueError('lookup failed') result = PolicyData.apply(get_value, axis=1) result.name = 'LoadMaintSA' return result def LoadMaintSA2(): """Maintenance Loading per Sum Assured after Premium Payment""" def get_value(pol): result = SpecLookup.match("LoadMaintSA2", pol["Product"], pol["PolicyType"], pol["Gen"]).value if result is not None: return result else: raise ValueError('lookup failed') result = PolicyData.apply(get_value, axis=1) result.name = 'LoadMaintSA2' return result def NetPremRate(basis): """Net Premium Rate""" data = pd.concat([PolicyData, LoadMaintSA2(), IntRate(basis), TableID(basis)], axis=1) def get_value(pol): prod = pol['Product'] gamma2 = pol['LoadMaintSA2'] x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm'] comf = LifeTable[pol['Sex'], pol['IntRate_' + basis], pol['TableID_' + basis]] if prod == 'TERM' or prod == 'WL': return (comf.Axn(x, n) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / comf.AnnDuenx(x, n) elif prod == 'ENDW': return (comf.Axn(x, n) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / comf.AnnDuenx(x, n) else: raise ValueError('invalid product') result = data.apply(get_value, axis=1) result.name = 'NetPremRate_' + basis return result def ReserveNLP_Rate(basis, t): """Net level premium reserve rate""" data = pd.concat([PolicyData, LoadMaintSA2(), IntRate(basis), TableID(basis), NetPremRate(basis)], axis=1) def get_value(pol): prod = pol['Product'] gamma2 = pol['LoadMaintSA2'] netp = pol['NetPremRate_' + basis] x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm'] lt = LifeTable[pol['Sex'], pol['IntRate_' + basis], pol['TableID_' + basis]] if t <= m: return lt.Axn(x+t, n-t) + (gamma2 * lt.AnnDuenx(x+t, n-m, 1, m-t) - netp * lt.AnnDuenx(x+t, m-t)) elif t <=n: return lt.Axn(x+t, n-t) + gamma2 * lt.AnnDuenx(x+t, n-m, 1, m-t) else: return 0 result = data.apply(get_value, axis=1) result.name = 'ReserveNLP_Rate' return result def ReserveRate(): """Valuation Reserve Rate per Sum Assured""" return None def SurrCharge(t): """Surrender Charge Rate per Sum Assured""" m = PremTerm() return InitSurrCharge * np.maximum((np.minimum(m, 10) - t) / np.minimum(m, 10), 0) def TableID(RateBasis): """Mortality Table ID""" if RateBasis == 'PREM': basis = "MortTablePrem" elif RateBasis == 'VAL': basis = "MortTableVal" else: raise ValueError('invalid RateBasis') def get_value(pol): result = SpecLookup.match(basis, pol["Product"], pol["PolicyType"], pol["Gen"]).value if result is not None: return result else: raise ValueError('lookup failed') result = PolicyData.apply(get_value, axis=1) result.name = 'TableID_' + RateBasis return result def UernPremRate(): """Unearned Premium Rate""" return None Product = lambda: PolicyData['Product'] PolicyType = lambda: PolicyData['PolicyType'] Gen = lambda: PolicyData['Gen'] Channel = lambda: PolicyData['Channel'] Sex = lambda: PolicyData['Sex'] Duration = lambda: PolicyData['Duration'] IssueAge = lambda: PolicyData['IssueAge'] PremFreq = lambda: PolicyData['PremFreq'] PolicyTerm = lambda: PolicyData['PolicyTerm'] PolicyCount = lambda: PolicyData['PolicyCount'] SumAssured = lambda: PolicyData['SumAssured'] def LoadMaintPrem2(): """Maintenance Loading per Gross Premium for Premium Waiver""" result = pd.Series(0.002, index=PolicyData.index) result[PremTerm < 10] = 0.001 result[PremTerm < 5] = 0.0005 result.name = 'LoadMaintPrem2' return result # --------------------------------------------------------------------------- # References LifeTable = ("Interface", ("...", "LifeTable"), "auto") PolicyData = ("Pickle", 2310405372040) SpecLookup = ("Interface", ("...", "Input", "SpecLookup"), "auto") PremTerm = ("Interface", (".", "PolicyTerm"), "auto")
gpl-3.0
-2,055,454,818,524,421,000
25.300481
111
0.577148
false
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/connectivity_parameters_py3.py
1
1429
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ConnectivityParameters(Model): """Parameters that determine how the connectivity check will be performed. All required parameters must be populated in order to send to Azure. :param source: Required. :type source: ~azure.mgmt.network.v2017_11_01.models.ConnectivitySource :param destination: Required. :type destination: ~azure.mgmt.network.v2017_11_01.models.ConnectivityDestination """ _validation = { 'source': {'required': True}, 'destination': {'required': True}, } _attribute_map = { 'source': {'key': 'source', 'type': 'ConnectivitySource'}, 'destination': {'key': 'destination', 'type': 'ConnectivityDestination'}, } def __init__(self, *, source, destination, **kwargs) -> None: super(ConnectivityParameters, self).__init__(**kwargs) self.source = source self.destination = destination
mit
-6,371,394,010,488,108,000
34.725
81
0.610917
false
projectshift/shift-memory
setup.py
1
2472
#!/usr/bin/env python import os from setuptools import setup, find_packages # ---------------------------------------------------------------------------- # Building # # Create source distribution: # ./setup.py sdist # # # Create binary distribution (non-univeral, python 3 only): # ./setup.py bdist_wheel --python-tag=py3 # # Register on PyPI: # twine register dist/mypkg.whl # # # Upload to PyPI: # twine upload dist/* # # ---------------------------------------------------------------------------- # project version version = '0.0.8' # development status # dev_status = '1 - Planning' # dev_status = '2 - Pre-Alpha' dev_status = '3 - Alpha' # dev_status = '4 - Beta' # dev_status = '5 - Production/Stable' # dev_status = '6 - Mature' # dev_status = '7 - Inactive' # github repository url repo = 'https://github.com/projectshift/shift-memory' license_type = 'MIT License' # monkey patch os for vagrant hardlinks del os.link # run setup setup(**dict( # author author='Dmitry Belyakov', author_email='[email protected]', # project meta name='shiftmemory', version=version, url=repo, download_url=repo + '/archive/v' + version + '.tar.gz', description='Python3 cache library', keywords=[ 'python3', 'cache', 'redis', ], # classifiers # see: https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # maturity 'Development Status :: ' + dev_status, # license 'License :: OSI Approved :: ' + license_type, # audience 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', # pythons 'Programming Language :: Python :: 3', # categories 'Environment :: Console', 'Environment :: Web Environment', 'Topic :: Software Development :: Libraries', 'Topic :: Utilities' ], # project packages packages=find_packages(exclude=['tests*']), # include none-code data files from manifest.in (http://goo.gl/Uf0Yxc) include_package_data=True, # project dependencies install_requires=[ 'click>=7.0,<8.0', 'redis>=3.2.1,<4.0.0', 'hiredis>=1.0.0,<2.0.0', 'arrow>=0.13.1,<1.0.0' ], # entry points entry_points=dict( console_scripts=[ 'shiftmemory = shiftmemory.cli.console:cli' ] ), # project license license=license_type ))
mit
-3,527,569,901,544,307,700
21.472727
78
0.565534
false
lyst/lightfm
doc/conf.py
1
9601
# -*- coding: utf-8 -*- # # LightFM documentation build configuration file, created by # sphinx-quickstart on Thu Apr 21 12:26:52 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import lightfm # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'LightFM' copyright = u'2016, Lyst (Maciej Kula)' author = u'Lyst (Maciej Kula)' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = lightfm.__version__ # The full version, including alpha/beta/rc tags. release = lightfm.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. #html_title = u'LightFM v1.8' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. #html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'LightFMdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'LightFM.tex', u'LightFM Documentation', u'Lyst (Maciej Kula)', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'lightfm', u'LightFM Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'LightFM', u'LightFM Documentation', author, 'LightFM', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Compact attribute lists napoleon_use_ivar = True
apache-2.0
-3,776,953,285,276,997,600
31.326599
80
0.706385
false
aranzgeo/properties
docs/conf.py
1
10385
# -*- coding: utf-8 -*- # # properties documentation build configuration file, created by # sphinx-quickstart on Thu May 12 10:22:17 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # import properties # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../')) # sys.path.append(os.path.abspath('./_ext')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'properties' copyright = u'2018, Seequent' author = u'Seequent' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'0.6.1' # The full version, including alpha/beta/rc tags. release = u'0.6.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] pass except Exception: html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # html_sidebar html_sidebars = { '**': [ 'globaltoc.html', 'searchbox.html', ], } # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] exclude_patterns = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'propertiesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'properties.tex', u'properties Documentation', u'Seequent', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'properties', u'properties Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'properties', u'properties Documentation', author, 'properties', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'https://docs.python.org/3/': None, 'https://docs.scipy.org/doc/numpy/': None, 'http://vectormath.readthedocs.io/en/latest/': None, } linkcheck_ignore = ['https://docs.scipy.org/doc/numpy/reference/'] linkcheck_retries = 10 import sphinx.environment from docutils.utils import get_source_line def _supress_nonlocal_image_warn(self, msg, node, **kwargs): if not msg.startswith('nonlocal image URI found:'): self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs) sphinx.environment.BuildEnvironment.warn_node = _supress_nonlocal_image_warn
mit
-6,904,015,504,437,077,000
30.75841
79
0.702744
false